repo_name
stringlengths 6
96
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 762
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
BhallaLab/moose-full | moose-examples/tutorials/ExcInhNet/ExcInhNet_Ostojic2014_Brunel2000_brian.py | 2 | 8580 | #!/usr/bin/env python
'''
The LIF network is based on:
Ostojic, S. (2014).
Two types of asynchronous activity in networks of
excitatory and inhibitory spiking neurons.
Nat Neurosci 17, 594-600.
Key parameter to change is synaptic coupling J (mV).
Tested with Brian 1.4.1
Written by Aditya Gilra, CAMP 2014, Bangalore, 20 June, 2014.
Updated to match MOOSE implementation by Aditya Gilra, Jan, 2015.
Currently, simtime and dt are modified to compare across MOOSE, Brian1 and Brian2.
'''
#import modules and functions to be used
from brian import * # importing brian also does:
# 'from pylab import *' which imports:
# matplot like commands into the namespace, further
# also can use np. for numpy and mpl. for matplotlib
import random
import time
np.random.seed(100) # set seed for reproducibility of simulations
random.seed(100) # set seed for reproducibility of simulations
# ###########################################
# Simulation parameters
# ###########################################
simdt = 0.01*ms
simtime = 10.0*second # Simulation time
defaultclock.dt = simdt # Brian's default sim time step
dt = defaultclock.dt/second # convert to value in seconds
clocknrn = Clock(dt=simdt,order=0)
clocksyn = Clock(dt=simdt,order=1)
# ###########################################
# Neuron model
# ###########################################
# equation: dv/dt=(1/taum)*(-(v-el))
# with spike when v>vt, reset to vr
el = -65.*mV # Resting potential
vt = -45.*mV # Spiking threshold
taum = 20.*ms # Membrane time constant
vr = -55.*mV # Reset potential
inp = 20.1*mV/taum # input I/C to each neuron
# same as setting el=-41 mV and inp=0
taur = 0.5*ms # Refractory period
taudelay = 0.5*ms + dt*second # synaptic delay
eqs_neurons='''
dv/dt=(1/taum)*(-(v-el))+inp : volt
'''
# ###########################################
# Network parameters: numbers
# ###########################################
N = 1000 # Total number of neurons
fexc = 0.8 # Fraction of exc neurons
NE = int(fexc*N) # Number of excitatory cells
NI = N-NE # Number of inhibitory cells
# ###########################################
# Network parameters: synapses
# ###########################################
C = 100 # Number of incoming connections on each neuron (exc or inh)
fC = fexc # fraction fC incoming connections are exc, rest inhibitory
excC = int(fC*C) # number of exc incoming connections
J = 0.8*mV # exc strength is J (in mV as we add to voltage)
# Critical J is ~ 0.45 mV in paper for N = 1000, C = 1000
g = 5.0 # -gJ is the inh strength. For exc-inh balance g>~f(1-f)=4
# ###########################################
# Initialize neuron (sub)groups
# ###########################################
neurons=NeuronGroup(N,model=eqs_neurons,\
threshold='v>=vt',reset=vr,refractory=taur,clock=clocknrn)
Pe=neurons.subgroup(NE)
Pi=neurons.subgroup(NI)
# not distributing uniformly to ensure match with MOOSE
#Pe.v = uniform(el,vt+10*mV,NE)
#Pi.v = uniform(el,vt+10*mV,NI)
neurons.v = linspace(el/mV-20,vt/mV,N)*mV
# ###########################################
# Connecting the network
# ###########################################
sparseness_e = fC*C/float(NE)
sparseness_i = (1-fC)*C/float(NI)
# Follow Dale's law -- exc (inh) neurons only have +ve (-ve) synapses.
con_e = Synapses(Pe,neurons,'',pre='v_post+=J',clock=clocksyn)
con_i = Synapses(Pi,neurons,'',pre='v_post+=-g*J',clock=clocksyn)
# I don't use Brian's connect_random,
# instead I use the same algorithm and seed as in the MOOSE version
#con_e.connect_random(sparseness=sparseness_e)
#con_i.connect_random(sparseness=sparseness_i)
## Connections from some Exc/Inh neurons to each neuron
random.seed(100) # set seed for reproducibility of simulations
for i in range(0,N):
## draw excC number of neuron indices out of NmaxExc neurons
preIdxs = random.sample(range(NE),excC)
## connect these presynaptically to i-th post-synaptic neuron
for synnum,preIdx in enumerate(preIdxs):
con_e[preIdx,i]=True
## draw inhC=C-excC number of neuron indices out of inhibitory neurons
preIdxs = random.sample(range(N-NE),C-excC)
## connect these presynaptically to i-th post-synaptic neuron
for synnum,preIdx in enumerate(preIdxs):
con_i[preIdx,i]=True
con_e.delay = taudelay
con_i.delay = taudelay
# ###########################################
# Setting up monitors
# ###########################################
Nmon = N
Nmon_exc = int(fexc*Nmon)
Pe_mon = Pe.subgroup(Nmon_exc)
sm_e = SpikeMonitor(Pe_mon)
Pi_mon = Pi.subgroup(Nmon-Nmon_exc)
sm_i = SpikeMonitor(Pi_mon)
# Population monitor
popm_e = PopulationRateMonitor(Pe,bin=1.*ms)
popm_i = PopulationRateMonitor(Pi,bin=1.*ms)
# voltage monitor
sm_e_vm = StateMonitor(Pe,'v',record=range(10),clock=clocknrn)
# ###########################################
# Simulate
# ###########################################
print "Setup complete, running for",simtime,"at dt =",dt,"s."
t1 = time.time()
run(simtime,report='text')
print 'inittime + runtime, t = ', time.time() - t1
print "For g,J =",g,J,"mean exc rate =",\
sm_e.nspikes/float(Nmon_exc)/(simtime/second),'Hz.'
print "For g,J =",g,J,"mean inh rate =",\
sm_i.nspikes/float(Nmon-Nmon_exc)/(simtime/second),'Hz.'
# ###########################################
# Analysis functions
# ###########################################
def rate_from_spiketrain(spiketimes,fulltime,dt,tau=50e-3):
"""
Returns a rate series of spiketimes convolved with a Gaussian kernel;
all times must be in SI units,
remember to divide fulltime and dt by second
"""
sigma = tau/2.
# normalized Gaussian kernel, integral with dt is normed to 1
# to count as 1 spike smeared over a finite interval
norm_factor = 1./(sqrt(2.*pi)*sigma)
gauss_kernel = array([norm_factor*exp(-x**2/(2.*sigma**2))\
for x in arange(-5.*sigma,5.*sigma+dt,dt)])
kernel_len = len(gauss_kernel)
# need to accommodate half kernel_len on either side of fulltime
rate_full = zeros(int(fulltime/dt)+kernel_len)
for spiketime in spiketimes:
idx = int(spiketime/dt)
rate_full[idx:idx+kernel_len] += gauss_kernel
# only the middle fulltime part of the rate series
# This is already in Hz,
# since should have multiplied by dt for above convolution
# and divided by dt to get a rate, so effectively not doing either.
return rate_full[kernel_len/2:kernel_len/2+int(fulltime/dt)]
# ###########################################
# Make plots
# ###########################################
fig = figure()
# Vm plots
timeseries = arange(0,simtime/second+dt,dt)
for i in range(3):
plot(timeseries[:len(sm_e_vm[i])],sm_e_vm[i])
fig = figure()
# raster plots
subplot(231)
raster_plot(sm_e,ms=1.)
title(str(Nmon_exc)+" exc neurons")
xlabel("")
xlim([0,simtime/ms])
subplot(234)
raster_plot(sm_i,ms=1.)
title(str(Nmon-Nmon_exc)+" inh neurons")
subplot(232)
# firing rates
timeseries = arange(0,simtime/second+dt,dt)
num_to_plot = 10
#rates = []
for nrni in range(num_to_plot):
rate = rate_from_spiketrain(sm_e[nrni],simtime/second,dt)
plot(timeseries[:len(rate)],rate)
#print mean(rate),len(sm_e[nrni])
#rates.append(rate)
title(str(num_to_plot)+" exc rates")
ylabel("Hz")
ylim(0,300)
subplot(235)
for nrni in range(num_to_plot):
rate = rate_from_spiketrain(sm_i[nrni],simtime/second,dt)
plot(timeseries[:len(rate)],rate)
#print mean(rate),len(sm_i[nrni])
#rates.append(rate)
title(str(num_to_plot)+" inh rates")
ylim(0,300)
#print "Mean rate = ",mean(rates)
xlabel("Time (s)")
ylabel("Hz")
# Population firing rates
subplot(233)
timeseries = arange(0,simtime/second,dt)
allspikes = []
for nrni in range(NE):
allspikes.extend(sm_e[nrni])
#plot(timeseries,popm_e.smooth_rate(width=50.*ms,filter="gaussian"),color='grey')
rate = rate_from_spiketrain(allspikes,simtime/second,dt)/float(NE)
plot(timeseries[:len(rate)],rate)
title("Exc population rate")
ylabel("Hz")
subplot(236)
timeseries = arange(0,simtime/second,dt)
allspikes = []
for nrni in range(NI):
allspikes.extend(sm_i[nrni])
#plot(timeseries,popm_i.smooth_rate(width=50.*ms,filter="gaussian"),color='grey')
rate = rate_from_spiketrain(allspikes,simtime/second,dt)/float(NI)
plot(timeseries[:len(rate)],rate)
title("Inh population rate")
xlabel("Time (s)")
ylabel("Hz")
fig.tight_layout()
show()
| gpl-2.0 |
stormsson/procedural_city_generation_wrapper | vendor/josauder/procedural_city_generation/roadmap/Vertex.py | 2 | 2610 | from procedural_city_generation.additional_stuff.Singleton import Singleton
try:
from procedural_city_generation.roadmap.main import gui as plt
if plt is None:
import matplotlib.pyplot as plt
except:
import matplotlib.pyplot as plt
plotbool=False
singleton=Singleton("roadmap")
class Vertex(object):
"""
Vertex (name after mathematical graph-theory) object used in roadmap submodule.
Has the following attributes:
- coords : numpy.ndarray(2, )
XY-Coordinates of this Vertex
- neighbours : list<procedural_city_generation.roadmap.Vertex>
List of all Vertices that this Vertex is currectly connected to (has a road to)
- minor_road : boolean
Describes whether this road is a minor road
- seed : boolean
Describes whether this (major) road is a seed
"""
def __init__(self, coords):
"""
Parameters
----------
coords : numpy.array(2, )
XY-Coordinates of this Vertex
"""
self.coords=coords
self.neighbours=[]
self.minor_road=False
self.seed=False
def __cmp__(self, other):
if isinstance(other, Vertex):
if self.coords[0]>other.coords[0]:
return 1
elif self.coords[0]<other.coords[0]:
return -1
else:
if self.coords[1]>other.coords[1]:
return 1
elif self.coords[1]<other.coords[1]:
return -1
return 0
def connection(self, other):
"""
Manages connections so that no Vertex has two connections to
the same other Vertex. Also responsible for plotting this Vertex
in matplotlib if the "plot" parameter in /inputs/roadmap.conf
is set to True.
Parameters
----------
other : procedural_city_generation.roadmap.Vertex object
The vertex that this vertex is goint to be connected to.
"""
if other not in self.neighbours:
self.neighbours.append(other)
if self not in other.neighbours:
other.neighbours.append(self)
if plotbool:
col='black'
width=3
if self.minor_road or other.minor_road:
col='blue'
width=1
plt.plot([self.coords[0], other.coords[0]], [self.coords[1], other.coords[1]], color=col, linewidth=width)
def __repr__(self):
return "Vertex"+str(self.coords)+"\n"
def set_plotbool(singletonbool):
global plotbool
plotbool=singletonbool
| mpl-2.0 |
opencleveland/RTAHeatMap | DataGeneration/DatabaseHandler.py | 2 | 7778 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sqlite3 as sql
import pandas as pd
from DataGeneration.MapLocation import MapLocation
class DatabaseHandler:
def __init__(self, db_file_name='db.sqlite3', full=True):
if full:
self.conn = sql.connect(db_file_name)
self.initialize_db()
def initialize_db(self):
self._add_addresses_table()
self._add_stops_table()
self._add_routes_table()
self.conn.commit()
def _add_addresses_table(self):
c = self.conn.cursor()
c.execute("""
CREATE TABLE IF NOT EXISTS addresses
(id INTEGER PRIMARY KEY,
latitude real NOT NULL,
longitude real NOT NULL)
""")
c.close()
def _add_stops_table(self):
c = self.conn.cursor()
c.execute("""
CREATE TABLE IF NOT EXISTS stops
(id INTEGER PRIMARY KEY,
stop_id INTEGER NOT NULL,
stop_name text NOT NULL,
latitude real NOT NULL,
longitude real NOT NULL)
""")
c.close()
def _add_routes_table(self):
c = self.conn.cursor()
c.execute("""
CREATE TABLE IF NOT EXISTS routes
(id INTEGER PRIMARY KEY,
address_id INTEGER NOT NULL,
stop_id INTEGER NOT NULL,
distance INTEGER NOT NULL,
time INTEGER NOT NULL,
FOREIGN KEY(address_id) REFERENCES addresses(id),
FOREIGN KEY(stop_id) REFERENCES stops(id))
""")
c.close()
def add_addresses_from_file(self, file_name):
df = pd.read_csv(file_name)
df.to_sql('addresses', self.conn, if_exists='append', index=False)
def add_stops_from_file(self, file_name):
df = pd.read_csv(file_name)
df = df[["stop_id", "stop_name", "longitude", "latitude"]]
df.to_sql('stops', self.conn, if_exists='append', index=False)
def add_address(self, location):
if not hasattr(location, 'latitude'):
raise TypeError('location must have latitude property')
if not hasattr(location, 'longitude'):
raise TypeError('location must have longitude property')
c = self.conn.cursor()
if location.id != 0:
c.execute("INSERT INTO addresses (id, latitude, longitude) "
"VALUES (?, ?, ?)",
(location.id, location.latitude, location.longitude))
else:
c.execute("INSERT INTO addresses (latitude, longitude) "
"VALUES (?, ?)", (location.latitude, location.longitude))
self.conn.commit()
c.close()
def add_stop(self, location):
if not hasattr(location, 'latitude'):
raise TypeError('location must have latitude property')
if not hasattr(location, 'longitude'):
raise TypeError('location must have longitude property')
c = self.conn.cursor()
if location.id != 0:
c.execute("INSERT INTO stops (id, latitude, longitude) "
"VALUES (?, ?, ?)",
(location.id, location.latitude, location.longitude))
else:
c.execute("INSERT INTO stops (latitude, longitude) "
"VALUES (?, ?)",
(location.latitude, location.longitude))
self.conn.commit()
c.close()
def add_route(self, address, stop, distance, time):
c = self.conn.cursor()
c.execute("INSERT INTO routes "
"(address_id, stop_id, distance, time) "
"VALUES (?, ?, ?, ?)",
(address, stop, distance, time))
self.conn.commit()
c.close()
# Information Retrieval
def get_address_generator(self, verbose=False):
c = self.conn.cursor()
c.execute("SELECT "
"addresses.latitude, addresses.longitude, addresses.id "
"FROM addresses LEFT JOIN routes "
"ON routes.address_id = addresses.id "
"WHERE routes.id IS NULL")
if verbose:
print("fetching all addresses without routes...")
rows = c.fetchall()
c.close()
if verbose:
print("fetched {} addresses".format(len(rows)))
for row in rows:
yield MapLocation(latitude=row[0], longitude=row[1], id=row[2])
def get_all_stops(self):
c = self.conn.cursor()
c.execute("SELECT * from stops")
rows = c.fetchall()
c.close()
return [MapLocation(latitude=row[3], longitude=row[4], id=row[0])
for row in rows]
def output_routes(self, file_path, closest_stops_only=False):
"""
Args:
file_path (str): the file path to save the .csv output
closest_stops_only (bool): If true, only save the stops that are the
closest (by the distance column) to each address. If there are
multiple stops per address that have the same distance, both are
returned.
Returns:
A pandas DataFrame with the following columns:
address_latitude
address_longitude
stop_latitude
stop_longitude
distance
time
This dataframe contains each route in the database that has been
collected by the begin() function of the DataGenerator class. If the
closest_stops_only parameter is set to true, then the output only
contains stops for each address which are equal to minimum distance
for routes associated with specific addresses. This means that it
can return multiple stops per address if their associated routes
have equal distances.
"""
if closest_stops_only:
return self.routes_dataframe_closest_stops().to_csv(file_path)
else:
return self.routes_dataframe().to_csv(file_path)
def routes_dataframe(self):
"""
Returns:
all routes along with the stop and address latitudes and longitudes
as a pandas DataFrame.
"""
return pd.read_sql_query(
"SELECT "
"addresses.latitude AS address_latitude,"
"addresses.longitude AS address_longitude,"
"stops.latitude AS stop_latitude,"
"stops.longitude AS stop_longitude,"
"routes.distance AS distance,"
"routes.time AS time "
"FROM routes "
"LEFT JOIN addresses ON routes.address_id = addresses.id "
"LEFT JOIN stops ON routes.stop_id = stops.id",
self.conn)
def routes_dataframe_closest_stops(self):
"""
Collects all routes and groups them by address. Returns the nearest stop
to each address as well as the time and distance to that stop. Sorts by
distance.
"""
df = self.routes_dataframe()
df_grouped = df.groupby(['address_latitude', 'address_longitude']).\
agg({'distance': 'min'})
df_grouped = df_grouped.reset_index()
df_grouped = df_grouped.rename(columns={'distance':'distance_min'})
df = pd.merge(df, df_grouped, how='left',
on=['address_latitude', 'address_longitude'])
df = df[df['distance'] == df['distance_min']]
return df[['address_latitude', 'address_longitude',
'stop_latitude', 'stop_longitude',
'distance', 'time']].reset_index()
| mit |
KarlTDebiec/myplotspec | text.py | 1 | 16328 | # -*- coding: utf-8 -*-
# myplotspec.text.py
#
# Copyright (C) 2015-2017 Karl T Debiec
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license. See the LICENSE file for details.
"""
Functions for formatting text.
.. todo:
- Resolve inconsistencies in how positions are determined, without
conflicting with matplotlib's style. Most likely, x and y should
retain their matplotlib behavior, being proportions when used with
figures and coordinates when used with subplots. Text functions
below should support xpro and ypro that are always proportions, xabs
and yabs that are always absolute coordinates, for subplots xcrd and
ycrd in subplot coordinates, and finally left, right, top, and
bottom in inches.
"""
################################### MODULES ###################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
################################## FUNCTIONS ##################################
def set_title(figure_or_subplot, verbose=1, debug=0, *args, **kwargs):
"""
Draws a title on a Figure or subplot.
Arguments:
figure_or_subplot (Figure, Axes): Object on which to draw title
title (str): Title text
title_fp (str, dict, FontProperties): Title font
top (float): Distance between top of figure and title (inches);
Figure title only
title_kw (dict): Keyword arguments passed to ``Figure.suptitle()``
or ``Axes.set_title()``
Returns:
(*Text*): Title
.. todo:
- If top is negative, use distance from highest subplot
"""
from warnings import warn
import matplotlib
from . import (FP_KEYS, get_edges, get_font, multi_get_copy, multi_pop)
# Determine title and keyword arguments
title_kw = multi_get_copy("title_kw", kwargs, {})
title = multi_get_copy("title", kwargs)
# Determine font and other settings
title_fp = multi_get_copy("title_fp", kwargs)
title_fp_2 = multi_pop(["title_fp"] + FP_KEYS, title_kw)
if title_fp_2 is not None:
title_kw["fontproperties"] = get_font(title_fp_2)
elif title_fp is not None:
title_kw["fontproperties"] = get_font(title_fp)
title_kw["horizontalalignment"] = multi_pop(["horizontalalignment", "ha"],
title_kw, "center")
title_kw["verticalalignment"] = multi_pop(["verticalalignment", "va"],
title_kw, "center")
# Determine drawing target and title
if isinstance(figure_or_subplot, matplotlib.figure.Figure):
figure = figure_or_subplot
fig_width = figure.get_figwidth()
fig_height = figure.get_figheight()
edges = get_edges(figure)
title_2 = multi_pop(["title", "t"], title_kw)
if title_2 is not None:
title_kw["t"] = title_2
elif title is not None:
title_kw["t"] = title
elif len(args) >= 1:
title_kw["t"] = args[0]
else:
return None
if "left" in title_kw:
left = title_kw.pop("left")
title_kw["x"] = left / fig_width
elif "right" in title_kw:
right = title_kw.pop("right")
title_kw["x"] = (fig_width - right) / fig_width
elif "x" not in title_kw:
title_kw["x"] = (edges["left"] + edges["right"]) / 2
if "top" in title_kw:
top = title_kw.pop("top")
if top > 0:
title_kw["y"] = (fig_height - top) / fig_height
else:
title_kw["y"] = (
(edges["top"] * fig_height) - top) / fig_height
if "fontproperties" in title_kw:
if verbose >= 2:
warn("matplotlib's figure.suptitle method currently supports "
"setting only only font size and weight, other font "
"settings may be lost.")
fontproperties = title_kw.pop("fontproperties")
title_kw["size"] = fontproperties.get_size()
title_kw["weight"] = fontproperties.get_weight()
return figure.suptitle(**title_kw)
elif isinstance(figure_or_subplot, matplotlib.axes.Axes):
subplot = figure_or_subplot
title_2 = multi_pop(["title", "label"], title_kw)
if title_2 is not None:
title_kw["label"] = title_2
elif title is not None:
title_kw["label"] = title
elif len(args) >= 1:
title_kw["label"] = args[0]
else:
return None
return subplot.set_title(**title_kw)
def set_shared_xlabel(figure_or_subplots, *args, **kwargs):
"""
Draws an x axis label shared by multiple subplots.
The horizontal position of the shared x label is by default the
center of the selected subplots, and the vertical position is
halfway between the bottommost subplot and the bottom of the figure.
Arguments:
figure_or_subplots (Figure, OrderedDict): Subplots to use to
calculate label horizontal position; if Figure, all subplots
present on figure are used
[shared_][x]label (str): Label text
[shared_][x]label_fp (str, dict, FontProperties): Label font
[shared_][x]label_kw (dict): Keyword arguments passed to
:func:`set_text`
bottom (float): Distance between bottom of figure and label
(inches); if negative, distance between bottommost plot and
label
top (float): Distance between top of figure and label (inches); if
negative, distance between topmost subplot and label; overrides
``bottom``
x (float): X position within figure (proportion 0.0-1.0); default
= center of selected subplots
y (float): Y position within figure (proportion 0.0-1.0);
overrides ``bottom`` and ``top``; default = halfway between
bottommost subplot and bottom of figure
Returns:
(Text): X axis label
"""
import matplotlib
from . import (FP_KEYS, get_edges, get_font, multi_get_copy, multi_pop)
# Determine label and keyword arguments
label_kw = multi_get_copy(["shared_xlabel_kw", "xlabel_kw", "label_kw"],
kwargs, {})
label = multi_get_copy(["shared_xlabel", "xlabel", "label"], kwargs)
label_2 = multi_pop(["shared_xlabel", "xlabel", "label", "s"], label_kw)
if label_2 is not None:
label_kw["s"] = label_2
elif label is not None:
label_kw["s"] = label
elif len(args) >= 1:
label_kw["s"] = args[0]
else:
return None
# Determine font and other settings
label_fp = multi_get_copy(
["shared_xlabel_fp", "xlabel_fp", "label_fp"] + FP_KEYS, kwargs)
label_fp_2 = multi_pop(
["shared_xlabel_fp", "xlabel_fp", "label_fp"] + FP_KEYS, label_kw)
if label_fp_2 is not None:
label_kw["fontproperties"] = get_font(label_fp_2)
elif label_fp is not None:
label_kw["fontproperties"] = get_font(label_fp)
label_kw["horizontalalignment"] = multi_pop(["horizontalalignment", "ha"],
label_kw, "center")
label_kw["verticalalignment"] = multi_pop(["verticalalignment", "va"],
label_kw, "center")
# x and y are specified in relative figure coordinates
# top and bottom are specified in inches
if isinstance(figure_or_subplots, matplotlib.figure.Figure):
figure = figure_or_subplots
edges = get_edges(figure)
elif isinstance(figure_or_subplots, dict):
subplots = figure_or_subplots
figure = subplots.values()[0].get_figure()
edges = get_edges(subplots)
if "x" not in label_kw:
label_kw["x"] = (edges["left"] + edges["right"]) / 2
if "y" not in label_kw:
fig_height = figure.get_figheight()
if "top" in label_kw:
top = label_kw.pop("top")
if top >= 0:
label_kw["y"] = (fig_height - top) / fig_height
else:
label_kw["y"] = (
((edges["top"] * fig_height) + top) / fig_height)
elif "bottom" in label_kw:
bottom = label_kw.pop("bottom")
if bottom >= 0:
label_kw["y"] = bottom / fig_height
else:
label_kw["y"] = (
((edges["bottom"] * fig_height) + bottom) / fig_height)
else:
label_kw["y"] = edges["bottom"] / 2
return set_text(figure, text_kw=label_kw, **kwargs)
def set_shared_ylabel(figure_or_subplots, *args, **kwargs):
"""
Draws a y-axis label shared by multiple subplots.
The vertical position of the shared y label is by default the
center of the selected subplots, and the horizontal position is
halfway between the leftmost subplot and the left edge of the
figure.
Arguments:
figure_or_subplots (Figure, OrderedDict): Subplots to use to
calculate label vertical position; if Figure, all subplots
present on figure are used
[shared_][y]label (str): Label text
[shared_][y]label_fp (str, dict, FontProperties): Label font
[shared_][y]label_kw (dict): Keyword arguments passed to
:func:`set_text`
left (float): Distance between left edge of figure and label
(inches); if negative, distance between leftmost plot and label
right (float): Distance between right edge of figure and label
(inches); if negative, distance between rightmost plot and
label; overrides ``left``
x (float): X position within figure (proportion 0.0-1.0);
overrides ``left`` and ``right``; default = halfway between
leftmost subplot and left edge of figure
y (float): Y position within figure (proportion 0.0-1.0); default
= center of selected subplots
Returns:
(Text): Y axis label
"""
import matplotlib
from . import (FP_KEYS, get_edges, get_font, multi_get_copy, multi_pop)
# Determine label and keyword arguments
label_kw = multi_get_copy(["shared_ylabel_kw", "ylabel_kw", "label_kw"],
kwargs, {})
label = multi_get_copy(["shared_ylabel", "ylabel", "label"], kwargs)
label_2 = multi_pop(["shared_ylabel", "ylabel", "label", "s"], label_kw)
if label_2 is not None:
label_kw["s"] = label_2
elif label is not None:
label_kw["s"] = label
elif len(args) >= 1:
label_kw["s"] = args[0]
else:
return None
# Determine font and other settings
label_fp = multi_get_copy(
["shared_ylabel_fp", "ylabel_fp", "label_fp"] + FP_KEYS, kwargs)
label_fp_2 = multi_pop(
["shared_ylabel_fp", "ylabel_fp", "label_fp"] + FP_KEYS, label_kw)
if label_fp_2 is not None:
label_kw["fontproperties"] = get_font(label_fp_2)
elif label_fp is not None:
label_kw["fontproperties"] = get_font(label_fp)
label_kw["horizontalalignment"] = multi_pop(["horizontalalignment", "ha"],
label_kw, "center")
label_kw["verticalalignment"] = multi_pop(["verticalalignment", "va"],
label_kw, "center")
label_kw["rotation"] = label_kw.pop("rotation", 90)
# Determine location
if isinstance(figure_or_subplots, matplotlib.figure.Figure):
figure = figure_or_subplots
edges = get_edges(figure)
elif isinstance(figure_or_subplots, dict):
subplots = figure_or_subplots
figure = subplots.values()[0].get_figure()
edges = get_edges(subplots)
# x and y are specified in relative figure coordinates
# left and right are specified in inches
if "x" not in label_kw:
fig_width = figure.get_figwidth()
if "left" in label_kw:
left = label_kw.pop("left")
if left < 0:
label_kw["x"] = (
((edges["left"] * fig_width) + left) / fig_width)
else:
label_kw["x"] = left / fig_width
elif "right" in label_kw:
right = label_kw.pop("right")
if right < 0:
label_kw["x"] = (
((edges["right"] * fig_width) - right) / fig_width)
else:
label_kw["x"] = (fig_width - right) / fig_width
else:
label_kw["x"] = edges["left"] / 2
if "y" not in label_kw:
label_kw["y"] = (edges["bottom"] + edges["top"]) / 2
return set_text(figure, text_kw=label_kw, **kwargs)
def set_label(subplot, *args, **kwargs):
"""
"""
from . import (FP_KEYS, get_edges, get_font, multi_get_copy, multi_pop)
# Determine label and keyword arguments
label_kw = multi_get_copy("label_kw", kwargs, {})
label = multi_get_copy("label", kwargs)
label_2 = multi_pop(["label", "s"], label_kw)
if label_2 is not None:
label_kw["s"] = label_2
elif label is not None:
label_kw["s"] = label
elif len(args) >= 1:
label_kw["s"] = args[0]
else:
return None
# Determine font and other settings
label_fp = multi_get_copy("label_fp", kwargs)
label_fp_2 = multi_pop(["label_fp"] + FP_KEYS, label_kw)
if label_fp_2 is not None:
label_kw["fontproperties"] = get_font(label_fp_2)
elif label_fp is not None:
label_kw["fontproperties"] = get_font(label_fp)
# x and y are specified in data, proportional, or absolute coordinates
if ("x" in label_kw and "y" in label_kw and label_kw["x"] is not None and
label_kw["y"] is not None):
pass
elif ("xpro" in label_kw and "ypro" in label_kw and label_kw[
"xpro"] is not None and label_kw["ypro"] is not None):
label_kw["x"] = label_kw.pop("xpro")
label_kw["y"] = label_kw.pop("ypro")
kwargs["transform"] = subplot.transAxes
elif ("xabs" in label_kw and "yabs" in label_kw and label_kw[
"xabs"] is not None and label_kw["yabs"] is not None):
edges = get_edges(subplot, absolute=True)
xabs = label_kw.pop("xabs")
yabs = label_kw.pop("yabs")
if xabs > 0:
label_kw["x"] = xabs / edges["width"]
else:
label_kw["x"] = (edges["width"] + xabs) / edges["width"]
if yabs > 0:
label_kw["y"] = yabs / edges["height"]
else:
label_kw["y"] = (edges["height"] + yabs) / edges["height"]
kwargs["transform"] = subplot.transAxes
if "border_lw" in label_kw:
kwargs["border_lw"] = label_kw.pop("border_lw")
return set_text(subplot, text_kw=label_kw, **kwargs)
def set_text(figure_or_subplot, *args, **kwargs):
"""
Draws text on a figure or subplot.
Arguments:
figure_or_subplot (Figure, Axes): Object on which to draw
text (str): Text
text_fp (str, dict, FontProperties): Text font
text_kw (dict): Keyword arguments passed to ``text()``
Returns:
(Text): Text
"""
from matplotlib import patheffects
from . import (FP_KEYS, get_colors, get_font, multi_get_copy, multi_pop)
# Determine text and keyword arguments
text_kw = multi_get_copy("text_kw", kwargs, {})
get_colors(text_kw)
text = multi_get_copy(["text", "s"], kwargs)
text_2 = multi_pop(["text", "s"], text_kw)
if text_2 is not None:
text_kw["s"] = text_2
elif text is not None:
text_kw["s"] = text
elif len(args) >= 1:
text_kw["s"] = args[0]
else:
return None
# Determine font settings
text_fp = multi_get_copy(["text_fp"] + FP_KEYS, kwargs)
text_fp_2 = multi_pop(["text_fp"] + FP_KEYS, text_kw)
if text_fp_2 is not None:
text_kw["fontproperties"] = get_font(text_fp_2)
elif text_fp is not None:
text_kw["fontproperties"] = get_font(text_fp)
# x and y
x = multi_get_copy("x", kwargs)
if x is not None and not "x" in text_kw:
text_kw["x"] = x
y = multi_get_copy("y", kwargs)
if y is not None and not "y" in text_kw:
text_kw["y"] = y
# Transform
if "transform" in kwargs:
text_kw["transform"] = kwargs.pop("transform")
# Draw text
text = figure_or_subplot.text(**text_kw)
# Draw border
border_lw = multi_get_copy("border_lw", kwargs)
if border_lw is not None:
text.set_path_effects(
[patheffects.Stroke(linewidth=border_lw, foreground="w"),
patheffects.Normal()])
return text
| bsd-3-clause |
jdmonaco/vmo-feedback-model | src/figures/remapping.py | 1 | 4689 | #encoding: utf-8
"""
remapping -- Remapping figure showing orthogonalization from initial phase reset
Created by Joe Monaco on 2010-10-12.
Copyright (c) 2009-2011 Johns Hopkins University. All rights reserved.
This software is provided AS IS under the terms of the Open Source MIT License.
See http://www.opensource.org/licenses/mit-license.php.
"""
# Library imports
import os
import numpy as np
import matplotlib as mpl
import matplotlib.pylab as plt
# Package imports
from ..core.analysis import BaseAnalysis
from ..vmo import VMOModel
from ..session import VMOSession
from ..compare import (correlation_matrix, correlation_diagonals,
population_spatial_correlation)
from ..tools.images import array_to_image
from ..tools.radians import circle_diff_vec
class RemappingFigure(BaseAnalysis):
"""
Run complete remapping experiment based on random initial reset
"""
label = "remapping"
def collect_data(self, N_samples=2, **kwargs):
"""Run basic VMOModel remapping experiment by randomly initializing
the phase code of a network of oscillators and place units.
Keyword arguments:
N_samples -- total number of simulations to run (N-1 remapped from 1st)
Additional keyword arguments are passed on to VMOModel.
"""
self.results['N_samples'] = N_samples
# Set up model parameters
pdict = dict( N_outputs=500,
N_theta=1000,
N_cues=1,
C_W=0.05,
gamma_local=0,
gamma_distal=0,
num_trials=N_samples,
refresh_fixed_points=False )
pdict.update(kwargs)
# Set up and run the path integration model
self.out('Running remapping simulations...')
model = VMOModel(**pdict)
model.advance_all()
sessions = VMOSession.get_session_list(model)
VMOSession.save_session_list(sessions,
os.path.join(self.datadir, 'samples'))
# Get unit ordering based on first environment
sortix = list(sessions[0].sortix)
sortix += list(set(range(sessions[0].num_units)) - set(sortix))
self.results['sortix'] = np.array(sortix)
# Save multi-session population responses and activity patterns
self.out('Computing and storing population responses...')
R = [SD.get_population_matrix(clusters=sortix) for SD in sessions]
np.save(os.path.join(self.datadir, 'R.npy'), np.asarray(R))
# Good-bye
self.out('All done!')
def create_plots(self, N_examples=4, examples=None):
"""Create figure(s) with basic data panels
"""
# Change to data directoary and start logging
os.chdir(self.datadir)
self.out.outfd = file('figure.log', 'w')
# Set up main figure for plotting
self.figure = {}
figsize = 9, 12
plt.rcParams['figure.figsize'] = figsize
self.figure['remapping'] = f = plt.figure(figsize=figsize)
f.suptitle(self.label.title())
# Load the data
R = np.load(os.path.join(self.datadir, 'R.npy'))
N = self.results['N_samples']
# Example active unit responses across environments
if examples is None:
active = set()
for j in xrange(N):
active = active.union(set((R[j].max(axis=1)>=1).nonzero()[0]))
active = list(active)
active.sort()
examples = np.random.permutation(len(active))[:N_examples]
examples = np.array(active)[examples]
self.out('Plotting example responses: %s'%repr(examples))
for i,ex in enumerate(examples):
self.out('Unit %d max response = %.2f Hz'%(ex, R[:,ex].max()))
for j in xrange(N):
ax = plt.subplot(2*N_examples, N, N*i+j+1)
ax.plot(R[j,ex], c='k', lw=1.5)
ax.set_xlim(0, 360)
ax.set_ylim(-0.1*R[:,ex].max(), 1.1*R[:,ex].max())
ax.set_axis_off()
# Population responses
for j in xrange(N):
self.out('Environment %d population max = %.2f Hz'%(j+1, R[j].max()))
ax = plt.subplot(2, N, j+1+N)
ax.imshow(R[j], aspect='auto', interpolation='nearest')
array_to_image(R[j], 'pop_env_%02d.png'%(j+1), cmap=mpl.cm.gray_r)
plt.draw()
plt.rcParams['figure.figsize'] = plt.rcParamsDefault['figure.figsize']
self.out.outfd.close()
| mit |
socrteas/avito | forked_file.py | 1 | 2684 | # -*- coding: utf-8 -*-
"""
Created on Sat Jun 27 21:21:21 2015
@author: cmarr
"""
# ad click prediction : a view from the trenches
# __author__ : Abhishek Thakur
# __credits__ : tinrtgu
from math import sqrt, exp, log
from csv import DictReader
import pandas as pd
import numpy as np
class ftrl(object):
def __init__(self, alpha, beta, l1, l2, bits):
self.z = [0.] * bits
self.n = [0.] * bits
self.alpha = alpha
self.beta = beta
self.l1 = l1
self.l2 = l2
self.w = {}
self.X = []
self.y = 0.
self.bits = bits
self.Prediction = 0.
def sgn(self, x):
if x < 0:
return -1
else:
return 1
def fit(self,line):
try:
self.ID = line['ID']
del line['ID']
except:
pass
try:
self.y = float(line['IsClick'])
del line['IsClick']
except:
pass
del line['HistCTR']
self.X = [0.] * len(line)
for i, key in enumerate(line):
val = line[key]
self.X[i] = (abs(hash(key + '_' + val)) % self.bits)
self.X = [0] + self.X
def logloss(self):
act = self.y
pred = self.Prediction
predicted = max(min(pred, 1. - 10e-15), 10e-15)
return -log(predicted) if act == 1. else -log(1. - predicted)
def predict(self):
W_dot_x = 0.
w = {}
for i in self.X:
if abs(self.z[i]) <= self.l1:
w[i] = 0.
else:
w[i] = (self.sgn(self.z[i]) * self.l1 - self.z[i]) / (((self.beta + sqrt(self.n[i]))/self.alpha) + self.l2)
W_dot_x += w[i]
self.w = w
self.Prediction = 1. / (1. + exp(-max(min(W_dot_x, 35.), -35.)))
return self.Prediction
def update(self, prediction):
for i in self.X:
g = (prediction - self.y) #* i
sigma = (1./self.alpha) * (sqrt(self.n[i] + g*g) - sqrt(self.n[i]))
self.z[i] += g - sigma*self.w[i]
self.n[i] += g*g
if __name__ == '__main__':
"""
SearchID AdID Position ObjectType HistCTR IsClick
"""
train = '../input/trainSearchStream.tsv'
clf = ftrl(alpha = 0.1,
beta = 1.,
l1 = 0.1,
l2 = 1.0,
bits = 20)
loss = 0.
count = 0
for t, line in enumerate(DictReader(open(train), delimiter='\t')):
clf.fit(line)
pred = clf.predict()
loss += clf.logloss()
clf.update(pred)
count += 1
if count%10000 == 0:
print ("(seen, loss) : ", (count, loss * 1./count))
if count == 100000:
break
test = '../input/testSearchStream.tsv'
with open('temp.csv', 'w') as output:
for t, line in enumerate(DictReader(open(test), delimiter='\t')):
clf.fit(line)
output.write('%s\n' % str(clf.predict()))
sample = pd.read_csv('../input/sampleSubmission.csv')
preds = np.array(pd.read_csv('temp.csv', header = None))
index = sample.ID.values - 1
sample['IsClick'] = preds[index]
sample.to_csv('submission.csv', index=False)
| mit |
mehdidc/scikit-learn | examples/classification/plot_lda.py | 164 | 2224 | """
====================================================================
Normal and Shrinkage Linear Discriminant Analysis for classification
====================================================================
Shows how shrinkage improves classification.
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.lda import LDA
n_train = 20 # samples for training
n_test = 200 # samples for testing
n_averages = 50 # how often to repeat classification
n_features_max = 75 # maximum number of features
step = 4 # step size for the calculation
def generate_data(n_samples, n_features):
"""Generate random blob-ish data with noisy features.
This returns an array of input data with shape `(n_samples, n_features)`
and an array of `n_samples` target labels.
Only one feature contains discriminative information, the other features
contain only noise.
"""
X, y = make_blobs(n_samples=n_samples, n_features=1, centers=[[-2], [2]])
# add non-discriminative features
if n_features > 1:
X = np.hstack([X, np.random.randn(n_samples, n_features - 1)])
return X, y
acc_clf1, acc_clf2 = [], []
n_features_range = range(1, n_features_max + 1, step)
for n_features in n_features_range:
score_clf1, score_clf2 = 0, 0
for _ in range(n_averages):
X, y = generate_data(n_train, n_features)
clf1 = LDA(solver='lsqr', shrinkage='auto').fit(X, y)
clf2 = LDA(solver='lsqr', shrinkage=None).fit(X, y)
X, y = generate_data(n_test, n_features)
score_clf1 += clf1.score(X, y)
score_clf2 += clf2.score(X, y)
acc_clf1.append(score_clf1 / n_averages)
acc_clf2.append(score_clf2 / n_averages)
features_samples_ratio = np.array(n_features_range) / n_train
plt.plot(features_samples_ratio, acc_clf1, linewidth=2,
label="LDA with shrinkage", color='r')
plt.plot(features_samples_ratio, acc_clf2, linewidth=2,
label="LDA", color='g')
plt.xlabel('n_features / n_samples')
plt.ylabel('Classification accuracy')
plt.legend(loc=1, prop={'size': 12})
plt.suptitle('LDA vs. shrinkage LDA (1 discriminative feature)')
plt.show()
| bsd-3-clause |
JackKelly/neuralnilm_prototype | scripts/e412.py | 2 | 6395 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter
from neuralnilm.updates import clipped_nesterov_momentum
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal, Identity
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.layers.batch_norm import BatchNormLayer
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
"""
e400
'learn_init': False
independently_centre_inputs : True
e401
input is in range [0,1]
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer']
# 'hair straighteners',
# 'television',
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
# max_input_power=100,
max_diff = 100,
on_power_thresholds=[5] * 5,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=512,
# random_window=64,
output_one_appliance=True,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=1,
skip_probability_for_first_appliance=0,
one_target_per_seq=False,
n_seq_per_batch=64,
# subsample_target=4,
include_diff=True,
include_power=False,
clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs=True,
# standardise_input=True,
standardise_targets=True,
unit_variance_targets=False,
# input_padding=2,
lag=0,
clip_input=False
# classification=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
# updates_func=momentum,
updates_func=clipped_nesterov_momentum,
updates_kwargs={'clip_range': (0, 10)},
learning_rate=1e-3,
learning_rate_changes_by_iteration={
1000: 1e-4,
4000: 1e-5
# 800: 1e-4
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True,
# auto_reshape=False,
# plotter=CentralOutputPlotter
plotter=Plotter(n_seq_to_plot=10)
)
def exp_a(name):
# ReLU hidden layers
# linear output
# output one appliance
# 0% skip prob for first appliance
# 100% skip prob for other appliances
# input is diff
global source
# source_dict_copy = deepcopy(source_dict)
# source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config']= [
{
'type': RecurrentLayer,
'num_units': 50,
'W_in_to_hid': Normal(std=1),
'W_hid_to_hid': Identity(scale=0.5),
'nonlinearity': rectify,
'learn_init': False,
'precompute_input': True
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=1/sqrt(50))
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
# EXPERIMENTS = list('abcdefghi')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=5000)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
else:
del net.source.train_activations
gc.collect()
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| mit |
jsanjak/hrregression | app/utils.py | 1 | 2059 | from bokeh.plotting import figure
from bokeh.embed import components
from bokeh.models import Span, FactorRange
from bokeh.charts import Bar
import pandas as pd
import numpy as np
from math import pi
def make_histogram(data,provider_id,provider_name,readmin_rate):
provider_data=data.ix[provider_id]
hist, edges = np.histogram(data, density=True, bins=50)
p = figure()#title = "Histogram of heart return days with value for "+ provider_name + " marked in red" )
p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:],
fill_color="#036564", line_color="#033649")
p.line([provider_data,provider_data],[0,.5],line_width=2,color="red")
p.yaxis.axis_label = None
p.xaxis.axis_label = readmin_rate
p.xaxis.axis_label_text_font_size = "20pt"
p.yaxis.major_label_text_font_size = "10pt"
p.yaxis.axis_label_text_font_size = "20pt"
return(p)
def plot_lasso(lasso_results):
data = lasso_results.sort_values(by='rank_coef',ascending=True)
yrange = data['measure_names'].tolist()
p = figure(width=800, height=600, x_range =[0,1.1*np.max(data['rank_coef'].values)], y_range=yrange)
p.rect(x=data['rank_coef']/2, y=yrange,
width=abs(data['rank_coef']), height=0.4,color=(76,114,176),
width_units="data", height_units="data")
#p = Bar(data,
# 'measure_names', values='rank_coef', legend=False,
# title="Most Actionable Factors")
#p.x_range = FactorRange(factors=data['measure_names'])
p.yaxis.major_label_orientation = pi/12
p.yaxis.axis_label = None
p.xaxis.axis_label = 'Actionability Index'
p.xaxis.axis_label_text_font_size = "20pt"
p.yaxis.major_label_text_font_size = "10pt"
p.yaxis.axis_label_text_font_size = "20pt"
return(p)
def my_z_score(x):
z=((x.values - x.mean())/x.std())
return(z)
def scale_zero_one(data):
return((data - np.min(data))/(np.max(data)-np.min(data)))
def rank_direction(coef,scaled):
if coef >0:
#delta_1 = scaled - 1
print(scaled)
rank_coef = np.abs(coef)/np.sqrt((1 - scaled))
else:
rank_coef = np.abs(coef)/np.sqrt((scaled))
return(rank_coef)
| mit |
stefanbuenten/nanodegree | p5/tools/startup.py | 9 | 1161 | #!/usr/bin/python
print
print "checking for nltk"
try:
import nltk
except ImportError:
print "you should install nltk before continuing"
print "checking for numpy"
try:
import numpy
except ImportError:
print "you should install numpy before continuing"
print "checking for scipy"
try:
import scipy
except:
print "you should install scipy before continuing"
print "checking for sklearn"
try:
import sklearn
except:
print "you should install sklearn before continuing"
print
print "downloading the Enron dataset (this may take a while)"
print "to check on progress, you can cd up one level, then execute <ls -lthr>"
print "Enron dataset should be last item on the list, along with its current size"
print "download will complete at about 423 MB"
import urllib
url = "https://www.cs.cmu.edu/~./enron/enron_mail_20150507.tgz"
urllib.urlretrieve(url, filename="../enron_mail_20150507.tgz")
print "download complete!"
print
print "unzipping Enron dataset (this may take a while)"
import tarfile
import os
os.chdir("..")
tfile = tarfile.open("enron_mail_20150507.tgz", "r:gz")
tfile.extractall(".")
print "you're ready to go!"
| mit |
sonnyhu/scikit-learn | examples/cluster/plot_digits_linkage.py | 369 | 2959 | """
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is especially pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
digits = datasets.load_digits(n_class=10)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, X, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout()
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s : %.2fs" % (linkage, time() - t0))
plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage)
plt.show()
| bsd-3-clause |
VicenteYanez/GFA | gfa/scripts/example_principalstrain.py | 1 | 1993 | #! /usr/bin/env python3
import math
import numpy as np
import pdb
import os
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
from scipy.interpolate import griddata
import geometry as geo
import field as field
from example_figures import tensor_figure
# cargar malla
dir_path = os.path.dirname(os.path.realpath(__file__))
path_node = "{}/../example_files/malla.node".format(dir_path)
path_ele = "{}/../example_files/malla.ele".format(dir_path)
x_triangulos = np.loadtxt(path_node, usecols=[1], skiprows=1)
y_triangulos = np.loadtxt(path_node, usecols=[2], skiprows=1)
origen = [-76, -46]
# cargar los datos
dataset = '{}/../example_files/vect2007_2010'.format(dir_path)
lon_gps = np.loadtxt(dataset, usecols=[1])
lat_gps = np.loadtxt(dataset, usecols=[2])
v_n_gps = np.loadtxt(dataset, usecols=[4])
v_e_gps = np.loadtxt(dataset, usecols=[3])
# conversion mm/año a m/año
v_e_gps = v_e_gps/1000
v_n_gps = v_n_gps/1000
# proyectar malla y puntos gps
malla_proy = geo.geo2proj(x_triangulos, y_triangulos, origen[0], origen[1])
gps_proy = geo.geo2proj(lon_gps, lat_gps, origen[0], origen[1])
# interpolar en puntos de malla triangular
vy_tri = griddata(gps_proy, v_n_gps, malla_proy, method='cubic')
vx_tri = griddata(gps_proy, v_e_gps, malla_proy, method='cubic')
"""
NOTA FAKO
CON MALLA_PROY Y VY_TRI/VX_TRI PUEDES PLOTEAR LOS VECTORES
INTERPOLADOS EN LA MALLA
"""
# ############################################################################
# Funciones field()
# ############################################################################
gradiente = field.triangular_gradient(vx_tri, vy_tri, malla_proy[0],
malla_proy[1], path_ele)
evalue, evector = field.principal_stress(gradiente)
# ############################################################################
# Graficar
# ############################################################################
fig = tensor_figure(x_triangulos, y_triangulos, evalue, evector)
# plt.show()
| gpl-3.0 |
paulmueller/radontea | examples/comparison_parallel.py | 2 | 2561 | """Comparison of parallel-beam reconstruction methods
This example illustrates the performance of the
different reconstruction techniques for a parallel-beam
geometry. The left column shows the reconstruction of
the original image and the right column shows the reconstruction
of the corresponding binary images. Note that the
SART process could be sped-up by computing an
initial guess with a non-iterative method and
setting it with the ``initial`` keyword argument.
"""
from matplotlib import pylab as plt
import numpy as np
import radontea
from radontea.logo import get_original
N = 55 # image size
A = 13 # number of sinogram angles
ITA = 10 # number of iterations a
ITB = 100 # number of iterations b
angles = np.linspace(0, np.pi, A)
im = get_original(N)
sino = radontea.radon_parallel(im, angles)
fbp = radontea.backproject(sino, angles)
fintp = radontea.fourier_map(sino, angles).real
sarta = radontea.sart(sino, angles, iterations=ITA)
sartb = radontea.sart(sino, angles, iterations=ITB)
im2 = (im >= (im.max() / 5)) * 255
sino2 = radontea.radon_parallel(im2, angles)
fbp2 = radontea.backproject(sino2, angles)
fintp2 = radontea.fourier_map(sino2, angles).real
sarta2 = radontea.sart(sino2, angles, iterations=ITA)
sartb2 = radontea.sart(sino2, angles, iterations=ITB)
plt.figure(figsize=(8, 22))
pltkw = {"vmin": -20,
"vmax": 280}
plt.subplot(6, 2, 1, title="original image")
plt.imshow(im, **pltkw)
plt.axis('off')
plt.subplot(6, 2, 2, title="binary image")
plt.imshow(im2, **pltkw)
plt.axis('off')
plt.subplot(6, 2, 3, title="sinogram (from original)")
plt.imshow(sino)
plt.axis('off')
plt.subplot(6, 2, 4, title="sinogram (from binary)")
plt.imshow(sino2)
plt.axis('off')
plt.subplot(6, 2, 5, title="filtered backprojection")
plt.imshow(fbp, **pltkw)
plt.axis('off')
plt.subplot(6, 2, 6, title="filtered backprojection")
plt.imshow(fbp2, **pltkw)
plt.axis('off')
plt.subplot(6, 2, 7, title="Fourier interpolation")
plt.imshow(fintp, **pltkw)
plt.axis('off')
plt.subplot(6, 2, 8, title="Fourier interpolation")
plt.imshow(fintp2, **pltkw)
plt.axis('off')
plt.subplot(6, 2, 9, title="SART ({} iterations)".format(ITA))
plt.imshow(sarta, **pltkw)
plt.axis('off')
plt.subplot(6, 2, 10, title="SART ({} iterations)".format(ITA))
plt.imshow(sarta2, **pltkw)
plt.axis('off')
plt.subplot(6, 2, 11, title="SART ({} iterations)".format(ITB))
plt.imshow(sartb, **pltkw)
plt.axis('off')
plt.subplot(6, 2, 12, title="SART ({} iterations)".format(ITB))
plt.imshow(sartb2, **pltkw)
plt.axis('off')
plt.tight_layout()
plt.show()
| bsd-3-clause |
pbrod/scipy | doc/source/tutorial/examples/normdiscr_plot2.py | 36 | 1641 | import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
npoints = 20 # number of integer support points of the distribution minus 1
npointsh = npoints // 2
npointsf = float(npoints)
nbound = 4 # bounds for the truncated normal
normbound = (1 + 1/npointsf) * nbound # actual bounds of truncated normal
grid = np.arange(-npointsh, npointsh+2,1) # integer grid
gridlimitsnorm = (grid - 0.5) / npointsh * nbound # bin limits for the truncnorm
gridlimits = grid - 0.5
grid = grid[:-1]
probs = np.diff(stats.truncnorm.cdf(gridlimitsnorm, -normbound, normbound))
gridint = grid
normdiscrete = stats.rv_discrete(
values=(gridint, np.round(probs, decimals=7)),
name='normdiscrete')
n_sample = 500
np.random.seed(87655678) # fix the seed for replicability
rvs = normdiscrete.rvs(size=n_sample)
f, l = np.histogram(rvs,bins=gridlimits)
sfreq = np.vstack([gridint,f,probs*n_sample]).T
fs = sfreq[:,1] / float(n_sample)
ft = sfreq[:,2] / float(n_sample)
fs = sfreq[:,1].cumsum() / float(n_sample)
ft = sfreq[:,2].cumsum() / float(n_sample)
nd_std = np.sqrt(normdiscrete.stats(moments='v'))
ind = gridint # the x locations for the groups
width = 0.35 # the width of the bars
plt.figure()
plt.subplot(111)
rects1 = plt.bar(ind, ft, width, color='b')
rects2 = plt.bar(ind+width, fs, width, color='r')
normline = plt.plot(ind+width/2.0, stats.norm.cdf(ind+0.5, scale=nd_std),
color='b')
plt.ylabel('cdf')
plt.title('Cumulative Frequency and CDF of normdiscrete')
plt.xticks(ind+width, ind)
plt.legend((rects1[0], rects2[0]), ('true', 'sample'))
plt.show()
| bsd-3-clause |
samkreter/fuzzyLilly | extention.py | 1 | 3988 | from memfuncs import MemFunc
import matplotlib.pyplot as plt
import numpy as np
from file import File
w = 2
INC = .1
class ExtentionOps:
def __init__(self,op):
opDict = {'add':np.add,
'sub':np.subtract,
'div':np.divide,
'mul':np.multiply,
'max':max,
'min':min,
'pow':pow}
self.opName = op
#Weird worka round for the complemnt
if op == "comp":
self.func = self.comp
elif op in opDict:
self.op = opDict[op]
self.func = self.extention
else:
raise ValueError("Invalid Operator")
#raise ValueError("Do not have that operator in AlphaOps")
def convertToDomain(self,A):
mem1 = MemFunc("trap",A)
newA = []
for i in np.arange(0,1.05,.05):
newA.append([i,self.round2(mem1.memFunc(i))])
return newA
def round2(self,val):
val = int(val * 100)
return (val / 100)
def round_to_05(self,n):
correction = 0.5 if n >= 0 else -0.5
return int( n/.05+correction ) * .05
def comp(self,A):
A = A[0]
if len(A) == 4:
A = self.convertToDomain(A)
out = [[],[]]
for a in A:
z = self.round_to_05(self.round2(1.0 - a[0]))
f = a[1]
try:
index = out[0].index(z)
out[1][index] = max(out[1][index],f)
except ValueError:
out[0].append(z)
out[1].append(f)
out = list(zip(out[0],out[1]))
out.sort(key=lambda x:x[0])
out1 = list(zip(*out))
A = np.array(A)
#[i[0] for i in sorted(enumerate(myList), key=lambda x:x[1])]
plt.title("Compliment")
plt.plot(out1[0],out1[1],c='y',linewidth=2)
plt.plot(A[:,0],A[:,1],c='k',linewidth=2)
plt.xlim([0,1])
plt.ylim([0,1])
plt.show()
print(out)
return out
def extention(self, params):
A = params[0]
for i in range(1,len(params)):
B = params[i]
#Convert a membership function to the right domain the first time
if len(A) == 4:
A = self.convertToDomain(A)
if len(B) == 4:
B = self.convertToDomain(B)
# print("A:",A)
# print("B:",B)
out = [[],[]]
for a in A:
for b in B:
z = self.round2(self.op(a[0], b[0]))
try:
b[1]
except:
continue
f = min(a[1],b[1])
try:
index = out[0].index(z)
out[1][index] = max(out[1][index],f)
except ValueError:
out[0].append(z)
out[1].append(f)
out = list(zip(out[0],out[1]))
out.sort(key=lambda x:x[0])
B = np.array(B)
A = np.array(A)
out1 = np.array(list(zip(*out)))
plt.plot(A[:,0],A[:,1],c='b',linewidth=2)
plt.plot(B[:,0],B[:,1],c='g',linewidth=2)
plt.plot(out1[0],out1[1],c='y',linewidth=2)
plt.xlim([0,1])
plt.ylim([0,1])
plt.title(self.opName)
plt.show()
A = out
return A
# e = ExtentionOps("add")
# mem1 = MemFunc('tri',[.2,.2,.4])
# mem2 = MemFunc('tri',[.4,.6,.8])
# #mem2 = lambda x: 1 if x == 1 else 0
# A = []
# B = []
# for i in np.arange(0,1,.05):
# A.append([i,e.round2(mem1.memFunc(i))])
# B.append([i,e.round2(mem2.memFunc(i))])
# A = np.array(A)
# B = np.array(B)
# #A = [.2,.4,.4,.6]
# # B = [.4,.6,.6,.8]
# print("########")
# #p = e.comp(A)
# t = e.extention([A,B])
# #print(e.extention([p,t]))
| mit |
KarlTDebiec/Moldynplot | moldynplot/dataset/ChemicalShiftSequenceDataset.py | 2 | 13642 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# moldynplot.dataset.ChemicalShiftDataset.py
#
# Copyright (C) 2015-2017 Karl T Debiec
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license. See the LICENSE file for details.
"""
Represents an NMR chemical shift data
"""
################################### MODULES ###################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
if __name__ == "__main__":
__package__ = str("moldynplot.dataset")
import moldynplot.dataset
from IPython import embed
import numpy as np
import pandas as pd
from .SequenceDataset import SequenceDataset
from ..myplotspec import sformat, wiprint
################################### CLASSES ###################################
class ChemicalShiftDataset(SequenceDataset):
"""
Represents an NMR chemical shift data
"""
@staticmethod
def construct_argparser(parser_or_subparsers=None, **kwargs):
"""
Adds arguments to an existing argument parser, constructs a
subparser, or constructs a new parser
Arguments:
parser_or_subparsers (ArgumentParser, _SubParsersAction,
optional): If ArgumentParser, existing parser to which
arguments will be added; if _SubParsersAction, collection of
subparsers to which a new argument parser will be added; if
None, a new argument parser will be generated
kwargs (dict): Additional keyword arguments
Returns:
ArgumentParser: Argument parser or subparser
"""
import argparse
# Process arguments
help_message = """Process NMR chemical shift data"""
if isinstance(parser_or_subparsers, argparse.ArgumentParser):
parser = parser_or_subparsers
elif isinstance(parser_or_subparsers, argparse._SubParsersAction):
parser = parser_or_subparsers.add_parser(name="chemical_shift",
description=help_message, help=help_message)
elif parser_or_subparsers is None:
parser = argparse.ArgumentParser(description=help_message)
# Defaults
if parser.get_default("cls") is None:
parser.set_defaults(cls=ChemicalShiftDataset)
# Arguments unique to this class
arg_groups = {ag.title: ag for ag in parser._action_groups}
# Input arguments
input_group = arg_groups.get("input",
parser.add_argument_group("input"))
try:
input_group.add_argument("-delays", dest="delays", metavar="DELAY",
nargs="+", type=float, help="""delays for each infile,
if infiles represent a series; number of delays must match
number of infiles""")
except argparse.ArgumentError:
pass
# Action arguments
action_group = arg_groups.get("action",
parser.add_argument_group("action"))
try:
action_group.add_argument("-relax", dest="calc_relax", type=str,
nargs="?", default=None, const="r1", help="""Calculate
relaxation rates and standard errors; may additionally
specify type of relaxation being measured (e.g. r1, r2)""")
except argparse.ArgumentError:
pass
# Arguments inherited from superclass
SequenceDataset.construct_argparser(parser)
return parser
def __init__(self, delays=None, calc_relax=False, calc_pdist=False,
outfile=None, interactive=False, **kwargs):
"""
Arguments:
infile{s} (list): Path(s) to input file(s); may contain
environment variables and wildcards
delays (list): Delays corresponding to series of infiles; used to
name columns of merged sequence DataFrame
use_indexes (list): Residue indexes to select from DataFrame,
once DataFrame has already been loaded
calc_pdist (bool): Calculate probability distribution
pdist_kw (dict): Keyword arguments used to configure
probability distribution calculation
dataset_cache (dict): Cache of previously-loaded Datasets
verbose (int): Level of verbose output
kwargs (dict): Additional keyword arguments
"""
# Process arguments
verbose = kwargs.get("verbose", 1)
self.dataset_cache = kwargs.get("dataset_cache", None)
# Read data
if delays is not None:
kwargs["infile_column_prefixes"] = ["{0:3.1f} ms".format(delay) for
delay in delays]
self.sequence_df = self.read(**kwargs)
# Cut data
if "use_indexes" in kwargs:
use_indexes = np.array(kwargs.pop("use_indexes"))
res_index = np.array(
[int(i.split(":")[1]) for i in self.sequence_df.index.values])
self.sequence_df = self.sequence_df[
np.in1d(res_index, use_indexes)]
# Calculate relaxation
if calc_relax:
relax_kw = kwargs.pop("relax_kw", {})
relax_kw["kind"] = calc_relax
self.sequence_df = self.calc_relax(df=self.sequence_df,
relax_kw=relax_kw, **kwargs)
# Calculate probability distribution
if calc_pdist:
self.pdist_df = self.calc_pdist(df=self.sequence_df, **kwargs)
# Output data
if verbose >= 2:
print("Processed sequence DataFrame:")
print(self.sequence_df)
if calc_pdist:
print("Processed pdist DataFrame:")
print(self.pdist_df)
# Write data
if outfile is not None:
self.write(df=self.sequence_df, outfile=outfile, **kwargs)
# Interactive prompt
if interactive:
embed()
def read(self, **kwargs):
"""
Reads sequence from one or more *infiles* into a DataFrame.
Extends :class:`Dataset<myplotspec.Dataset.Dataset>` with
option to read in residue indexes.
"""
from os import devnull
import re
from subprocess import Popen, PIPE
from ..myplotspec import multi_pop_merged
# Functions
def convert_name(name):
return "{0}:{1}".format(name[-4:-1].upper(), name[2:-4])
# Process arguments
infile_args = multi_pop_merged(["infile", "infiles"], kwargs)
infiles = self.infiles = self.process_infiles(infiles=infile_args)
if len(infiles) == 0:
raise Exception(sformat("""No infiles found matching
'{0}'""".format(infile_args)))
re_h5 = re.compile(
r"^(?P<path>(.+)\.(h5|hdf5))((:)?(/)?(?P<address>.+))?$",
flags=re.UNICODE)
infile_column_prefixes = kwargs.get("infile_column_prefixes",
range(len(infiles)))
# Load Data
dfs = []
for infile in infiles:
if re_h5.match(infile):
df = self._read_hdf5(infile, **kwargs)
else:
with open(devnull, "w") as fnull:
header = " ".join(
Popen("head -n 1 {0}".format(infile), stdout=PIPE,
stderr=fnull, shell=True).stdout.read().strip().split(
"\t"))
ccpnmr_header = sformat("""Number # Position F1 Position F2
Assign F1 Assign F2 Height Volume Line Width F1 (Hz) Line
Width F2 (Hz) Merit Details Fit Method Vol. Method""")
if (header == ccpnmr_header):
read_csv_kw = dict(index_col=None, delimiter="\t",
dtype={"Position F1": np.float32,
"Position F2": np.float32, "Assign F1": np.str,
"Height": np.float32, "Volume": np.float32},
usecols=[str("Position F1"), str("Position F2"),
str("Assign F1"), str("Height"), str("Volume")],
converters={"Assign F1": convert_name})
read_csv_kw.update(kwargs.get("read_csv_kw", {}))
kwargs["read_csv_kw"] = read_csv_kw
df = self._read_text(infile, **kwargs)
df.columns = ["1H", "15N", "residue", "height", "volume"]
df.set_index("residue", inplace=True)
else:
df = self._read_text(infile, **kwargs)
dfs.append(df)
if len(dfs) == 1:
df = dfs[0]
else:
df = dfs[0][["1H", "15N"]]
if len(dfs) != len(infile_column_prefixes):
raise Exception(sformat("""Numb of infile column prefixes
must match number of provided infiles"""))
for df_i, prefix_i in zip(dfs, infile_column_prefixes):
df["{0} height".format(prefix_i)] = df_i["height"]
df["{0} volume".format(prefix_i)] = df_i["volume"]
self.dfs = dfs
# Sort
if df.index.name == "residue":
df = df.loc[
sorted(df.index.values, key=lambda x: int(x.split(":")[1]))]
else:
df = df.loc[sorted(df.index.values)]
return df
def calc_relax(self, **kwargs):
"""
Calculates relaxation rates.
Arguments:
df (DataFrame): DataFrame; probability distribution will be
calculated for each column using rows as data points
relax_kw (dict): Keyword arguments used to configure
relaxation rate calculation
relax_kw[kind] (str): Kind of relaxation rate being
calculated; will be used to name column
relax_kw[intensity_method] (str): Metric to use for peak
instensity; may be 'height' (default) or 'volume'
relax_kw[error_method] (str): Metric to use for error
calculation; may be 'rmse' for root-mean-square error
(default) or 'mae' for mean absolute error
relax_kw[n_synth_datasets] (int): Number of synthetic datasets
to use for error calculation
Returns:
DataFrame: Sequence DataFrame with additional columns for
relaxation rate and standard error
"""
import re
from scipy.optimize import curve_fit
# Process arguments
verbose = kwargs.get("verbose", 1)
df = kwargs.get("df")
if df is None:
if hasattr(self, "sequence_df"):
df = self.sequence_df
else:
raise ()
relax_kw = kwargs.get("relax_kw", {})
kind = relax_kw.get("kind", "r1")
intensity_method = relax_kw.get("intensity_method", "height")
error_method = relax_kw.get("error_method", "mae")
n_synth_datasets = relax_kw.get("n_synth_datasets", 1000)
# Calculate relaxation rates
re_column = re.compile(
"^(?P<delay>\d+\.?\d*?) ms {0}".format(intensity_method))
columns = [c for c in df.columns.values if re_column.match(c)]
delays = np.array(
[re.match(re_column, c).groupdict()["delay"] for c in columns],
np.float) / 1000
def calc_relax_rate(residue, **kwargs):
"""
"""
from .. import multiprocess_map
if verbose >= 1:
wiprint(
"""Calculating {0} relaxation rate for {1}""".format(kind,
residue.name))
def model_function(delay, intensity, relaxation):
return intensity * np.exp(-1 * delay * relaxation)
I = np.array(residue.filter(columns, np.float64))
I0, R = curve_fit(model_function, delays, I, p0=(I[0], 1.0))[0]
# Calculate error
if error_method == "rmse":
error = np.sqrt(
np.mean((I - model_function(delays, I0, R)) ** 2))
elif error_method == "mae":
error = np.mean(
np.sqrt((I - model_function(delays, I0, R)) ** 2))
# Construct synthetic relaxation profiles
synth_datasets = np.zeros((n_synth_datasets, I.size))
for i, I_mean in enumerate(model_function(delays, I0, R)):
synth_datasets[:, i] = np.random.normal(I_mean, error,
n_synth_datasets)
def synth_fit_decay(synth_intensity):
try:
synth_I0, synth_R = \
curve_fit(model_function, delays, synth_intensity,
p0=(I0, R))[0]
return synth_I0, synth_R
except RuntimeError:
if verbose >= 1:
wiprint("""Unable to calculate standard error for {0}
""".format(residue.name))
return (np.nan, np.nan)
# Calculate standard error
synth_I0_Rs = np.array(multiprocess_map(synth_fit_decay,
synth_datasets, 16))
I0_se = np.std(synth_I0_Rs[:,0])
R_se = np.std(synth_I0_Rs[:,1])
return pd.Series([I0, I0_se, R, R_se])
# Calculate relaxation rates and standard errors
fit = df.apply(calc_relax_rate, axis=1)
# Format and return
fit.columns = ["I0", "I0 se", kind, kind + " se"]
df = df.join(fit)
return df
#################################### MAIN #####################################
if __name__ == "__main__":
ChemicalShiftDataset.main() | bsd-3-clause |
e-koch/spectral-cube | spectral_cube/lower_dimensional_structures.py | 3 | 40372 | from __future__ import print_function, absolute_import, division
import warnings
import numpy as np
from numpy.ma.core import nomask
import dask.array as da
from astropy import convolution
from astropy import units as u
from astropy import wcs
#from astropy import log
from astropy.io.fits import Header, HDUList, PrimaryHDU, BinTableHDU, FITS_rec
from radio_beam import Beam, Beams
from astropy.io.registry import UnifiedReadWriteMethod
from . import spectral_axis
from .io.core import LowerDimensionalObjectWrite
from .utils import SliceWarning, BeamWarning, SmoothingWarning, FITSWarning
from . import cube_utils
from . import wcs_utils
from .masks import BooleanArrayMask, MaskBase
from .base_class import (BaseNDClass, SpectralAxisMixinClass,
SpatialCoordMixinClass, MaskableArrayMixinClass,
MultiBeamMixinClass, BeamMixinClass,
HeaderMixinClass
)
__all__ = ['LowerDimensionalObject', 'Projection', 'Slice', 'OneDSpectrum']
class LowerDimensionalObject(u.Quantity, BaseNDClass, HeaderMixinClass):
"""
Generic class for 1D and 2D objects.
"""
@property
def hdu(self):
if self.wcs is None:
hdu = PrimaryHDU(self.value)
else:
hdu = PrimaryHDU(self.value, header=self.header)
hdu.header['BUNIT'] = self.unit.to_string(format='fits')
if 'beam' in self.meta:
hdu.header.update(self.meta['beam'].to_header_keywords())
return hdu
def read(self, *args, **kwargs):
raise NotImplementedError()
write = UnifiedReadWriteMethod(LowerDimensionalObjectWrite)
def __getslice__(self, start, end, increment=None):
# I don't know why this is needed, but apparently one of the inherited
# classes implements getslice, which forces us to overwrite it
# I can't find any examples where __getslice__ is actually implemented,
# though, so this seems like a deep and frightening bug.
#log.debug("Getting a slice from {0} to {1}".format(start,end))
return self.__getitem__(slice(start, end, increment))
def __getitem__(self, key, **kwargs):
"""
Return a new `~spectral_cube.lower_dimensional_structures.LowerDimensionalObject` of the same class while keeping
other properties fixed.
"""
new_qty = super(LowerDimensionalObject, self).__getitem__(key)
if new_qty.ndim < 2:
# do not return a projection
return u.Quantity(new_qty)
if self._wcs is not None:
if ((isinstance(key, tuple) and
any(isinstance(k, slice) for k in key) and
len(key) > self.ndim)):
# Example cases include: indexing tricks like [:,:,None]
warnings.warn("Slice {0} cannot be used on this {1}-dimensional"
" array's WCS. If this is intentional, you "
" should use this {2}'s ``array`` or ``quantity``"
" attribute."
.format(key, self.ndim, type(self)),
SliceWarning
)
return self.quantity[key]
else:
newwcs = self._wcs[key]
else:
newwcs = None
new = self.__class__(value=new_qty.value,
unit=new_qty.unit,
copy=False,
wcs=newwcs,
meta=self._meta,
mask=(self._mask[key] if self._mask is not nomask
else None),
header=self._header,
**kwargs)
new._wcs = newwcs
new._meta = self._meta
new._mask=(self._mask[key] if self._mask is not nomask else nomask)
new._header = self._header
return new
def __array_finalize__(self, obj):
self._wcs = getattr(obj, '_wcs', None)
self._meta = getattr(obj, '_meta', None)
self._mask = getattr(obj, '_mask', None)
self._header = getattr(obj, '_header', None)
self._spectral_unit = getattr(obj, '_spectral_unit', None)
self._fill_value = getattr(obj, '_fill_value', np.nan)
self._wcs_tolerance = getattr(obj, '_wcs_tolerance', 0.0)
if isinstance(obj, VaryingResolutionOneDSpectrum):
self._beams = getattr(obj, '_beams', None)
else:
self._beam = getattr(obj, '_beam', None)
super(LowerDimensionalObject, self).__array_finalize__(obj)
@property
def __array_priority__(self):
return super(LowerDimensionalObject, self).__array_priority__*2
@property
def array(self):
"""
Get a pure array representation of the LDO. Useful when multiplying
and using numpy indexing tricks.
"""
return np.asarray(self)
@property
def _data(self):
# the _data property is required by several other mixins
# (which probably means defining it here is a bad design)
return self.array
@property
def quantity(self):
"""
Get a pure `~astropy.units.Quantity` representation of the LDO.
"""
return u.Quantity(self)
def to(self, unit, equivalencies=[], freq=None):
"""
Return a new `~spectral_cube.lower_dimensional_structures.Projection`
of the same class with the specified unit.
See `astropy.units.Quantity.to` for further details.
"""
if not isinstance(unit, u.Unit):
unit = u.Unit(unit)
if unit == self.unit:
# No copying
return self
if hasattr(self, 'with_spectral_unit'):
freq = self.with_spectral_unit(u.Hz).spectral_axis
if freq is None and 'RESTFRQ' in self.header:
freq = self.header['RESTFRQ'] * u.Hz
# Create the tuple of unit conversions needed.
factor = cube_utils.bunit_converters(self, unit, equivalencies=equivalencies,
freq=freq)
converted_array = (self.quantity * factor).value
# use private versions of variables, not the generated property
# versions
# Not entirely sure the use of __class__ here is kosher, but we do want
# self.__class__, not super()
new = self.__class__(value=converted_array, unit=unit, copy=True,
wcs=self._wcs, meta=self._meta, mask=self._mask,
header=self._header)
return new
@property
def _mask(self):
""" Annoying hack to deal with np.ma.core.is_mask failures (I don't
like using __ but I think it's necessary here)"""
if self.__mask is None:
# need this to be *exactly* the numpy boolean False
return nomask
return self.__mask
@_mask.setter
def _mask(self, value):
self.__mask = value
def shrink_mask(self):
"""
Copy of the numpy masked_array shrink_mask method. This is essentially
a hack needed for matplotlib to show images.
"""
m = self._mask
if m.ndim and not m.any():
self._mask = nomask
return self
def _initial_set_mask(self, mask):
"""
Helper tool to validate mask when originally setting it in __new__
Note that because this is intended to be used in __new__, order
matters: ``self`` must have ``_wcs``, for example.
"""
if mask is None:
mask = BooleanArrayMask(np.ones_like(self.value, dtype=bool),
self._wcs, shape=self.value.shape)
elif isinstance(mask, np.ndarray):
if mask.shape != self.value.shape:
raise ValueError("Mask shape must match the {0} shape."
.format(self.__class__.__name__)
)
mask = BooleanArrayMask(mask, self._wcs, shape=self.value.shape)
elif isinstance(mask, MaskBase):
pass
else:
raise TypeError("mask of type {} is not a supported mask "
"type.".format(type(mask)))
# Validate the mask before setting
mask._validate_wcs(new_data=self.value, new_wcs=self._wcs,
wcs_tolerance=self._wcs_tolerance)
self._mask = mask
class Projection(LowerDimensionalObject, SpatialCoordMixinClass,
MaskableArrayMixinClass, BeamMixinClass):
def __new__(cls, value, unit=None, dtype=None, copy=True, wcs=None,
meta=None, mask=None, header=None, beam=None,
fill_value=np.nan, read_beam=False, wcs_tolerance=0.0):
if np.asarray(value).ndim != 2:
raise ValueError("value should be a 2-d array")
if wcs is not None and wcs.wcs.naxis != 2:
raise ValueError("wcs should have two dimension")
self = u.Quantity.__new__(cls, value, unit=unit, dtype=dtype,
copy=copy).view(cls)
self._wcs = wcs
self._meta = {} if meta is None else meta
self._wcs_tolerance = wcs_tolerance
self._initial_set_mask(mask)
self._fill_value = fill_value
if header is not None:
self._header = header
else:
self._header = Header()
if beam is None:
if "beam" in self.meta:
beam = self.meta['beam']
elif read_beam:
beam = cube_utils.try_load_beam(header)
if beam is None:
warnings.warn("Cannot load beam from header.",
BeamWarning
)
if beam is not None:
self.beam = beam
self.meta['beam'] = beam
# TODO: Enable header updating when non-celestial slices are
# properly handled in the WCS object.
# self._header.update(beam.to_header_keywords())
self._cache = {}
return self
def with_beam(self, beam):
'''
Attach a new beam object to the Projection.
Parameters
----------
beam : `~radio_beam.Beam`
A new beam object.
'''
meta = self.meta.copy()
meta['beam'] = beam
return self._new_projection_with(beam=beam, meta=meta)
def with_fill_value(self, fill_value):
"""
Create a new :class:`Projection` or :class:`Slice` with a different
``fill_value``.
"""
return self._new_projection_with(fill_value=fill_value)
@property
def _new_thing_with(self):
return self._new_projection_with
def _new_projection_with(self, data=None, wcs=None, mask=None, meta=None,
fill_value=None, spectral_unit=None, unit=None,
header=None, wcs_tolerance=None, beam=None,
**kwargs):
data = self._data if data is None else data
if unit is None and hasattr(data, 'unit'):
if data.unit != self.unit:
raise u.UnitsError("New data unit '{0}' does not"
" match unit '{1}'. You can"
" override this by specifying the"
" `unit` keyword."
.format(data.unit, self.unit))
unit = data.unit
elif unit is None:
unit = self.unit
elif unit is not None:
# convert string units to Units
if not isinstance(unit, u.Unit):
unit = u.Unit(unit)
if hasattr(data, 'unit'):
if u.Unit(unit) != data.unit:
raise u.UnitsError("The specified new cube unit '{0}' "
"does not match the input unit '{1}'."
.format(unit, data.unit))
else:
data = u.Quantity(data, unit=unit, copy=False)
wcs = self._wcs if wcs is None else wcs
mask = self._mask if mask is None else mask
if meta is None:
meta = {}
meta.update(self._meta)
if unit is not None:
meta['BUNIT'] = unit.to_string(format='FITS')
fill_value = self._fill_value if fill_value is None else fill_value
if beam is None:
if hasattr(self, 'beam'):
beam = self.beam
newproj = self.__class__(value=data, wcs=wcs, mask=mask, meta=meta,
unit=unit, fill_value=fill_value,
header=header or self._header,
wcs_tolerance=wcs_tolerance or self._wcs_tolerance,
beam=beam,
**kwargs)
return newproj
@staticmethod
def from_hdu(hdu):
'''
Return a projection from a FITS HDU.
'''
if isinstance(hdu, HDUList):
hdul = hdu
hdu = hdul[0]
if not len(hdu.data.shape) == 2:
raise ValueError("HDU must contain two-dimensional data.")
meta = {}
mywcs = wcs.WCS(hdu.header)
if "BUNIT" in hdu.header:
unit = cube_utils.convert_bunit(hdu.header["BUNIT"])
meta["BUNIT"] = hdu.header["BUNIT"]
else:
unit = None
beam = cube_utils.try_load_beam(hdu.header)
self = Projection(hdu.data, unit=unit, wcs=mywcs, meta=meta,
header=hdu.header, beam=beam)
return self
def quicklook(self, filename=None, use_aplpy=True, aplpy_kwargs={}):
"""
Use `APLpy <https://pypi.python.org/pypi/APLpy>`_ to make a quick-look
image of the projection. This will make the ``FITSFigure`` attribute
available.
If there are unmatched celestial axes, this will instead show an image
without axis labels.
Parameters
----------
filename : str or Non
Optional - the filename to save the quicklook to.
"""
if use_aplpy:
try:
if not hasattr(self, 'FITSFigure'):
import aplpy
self.FITSFigure = aplpy.FITSFigure(self.hdu,
**aplpy_kwargs)
self.FITSFigure.show_grayscale()
self.FITSFigure.add_colorbar()
if filename is not None:
self.FITSFigure.save(filename)
except (wcs.InconsistentAxisTypesError, ImportError):
self._quicklook_mpl(filename=filename)
else:
self._quicklook_mpl(filename=filename)
def _quicklook_mpl(self, filename=None):
from matplotlib import pyplot
self.figure = pyplot.gcf()
self.image = pyplot.imshow(self.value)
if filename is not None:
self.figure.savefig(filename)
def convolve_to(self, beam, convolve=convolution.convolve_fft,
**kwargs):
"""
Convolve the image to a specified beam.
Parameters
----------
beam : `radio_beam.Beam`
The beam to convolve to
convolve : function
The astropy convolution function to use, either
`astropy.convolution.convolve` or
`astropy.convolution.convolve_fft`
Returns
-------
proj : `Projection`
A Projection convolved to the given ``beam`` object.
"""
self._raise_wcs_no_celestial()
if not hasattr(self, 'beam'):
raise ValueError("No beam is contained in Projection.meta.")
# Check if the beams are the same.
if beam == self.beam:
warnings.warn("The given beam is identical to the current beam. "
"Skipping convolution.")
return self
pixscale = wcs.utils.proj_plane_pixel_area(self.wcs.celestial)**0.5 * u.deg
convolution_kernel = \
beam.deconvolve(self.beam).as_kernel(pixscale)
newdata = convolve(self.value, convolution_kernel,
normalize_kernel=True,
**kwargs)
self = Projection(newdata, unit=self.unit, wcs=self.wcs,
meta=self.meta, header=self.header,
beam=beam)
return self
def reproject(self, header, order='bilinear'):
"""
Reproject the image into a new header.
Parameters
----------
header : `astropy.io.fits.Header`
A header specifying a cube in valid WCS
order : int or str, optional
The order of the interpolation (if ``mode`` is set to
``'interpolation'``). This can be either one of the following
strings:
* 'nearest-neighbor'
* 'bilinear'
* 'biquadratic'
* 'bicubic'
or an integer. A value of ``0`` indicates nearest neighbor
interpolation.
"""
self._raise_wcs_no_celestial()
try:
from reproject.version import version
except ImportError:
raise ImportError("Requires the reproject package to be"
" installed.")
# Need version > 0.2 to work with cubes
from distutils.version import LooseVersion
if LooseVersion(version) < "0.3":
raise Warning("Requires version >=0.3 of reproject. The current "
"version is: {}".format(version))
from reproject import reproject_interp
# TODO: Find the minimal footprint that contains the header and only reproject that
# (see FITS_tools.regrid_cube for a guide on how to do this)
newwcs = wcs.WCS(header)
shape_out = [header['NAXIS{0}'.format(i + 1)] for i in range(header['NAXIS'])][::-1]
newproj, newproj_valid = reproject_interp((self.value,
self.header),
newwcs,
shape_out=shape_out,
order=order)
self = Projection(newproj, unit=self.unit, wcs=newwcs,
meta=self.meta, header=header,
read_beam=True)
return self
def subimage(self, xlo='min', xhi='max', ylo='min', yhi='max'):
"""
Extract a region spatially.
When spatial WCS dimensions are given as an `~astropy.units.Quantity`,
the spatial coordinates of the 'lo' and 'hi' corners are solved together.
This minimizes WCS variations due to the sky curvature when slicing from
a large (>1 deg) image.
Parameters
----------
[xy]lo/[xy]hi : int or `astropy.units.Quantity` or `min`/`max`
The endpoints to extract. If given as a quantity, will be
interpreted as World coordinates. If given as a string or
int, will be interpreted as pixel coordinates.
"""
self._raise_wcs_no_celestial()
# Solve for the spatial pixel indices together
limit_dict = wcs_utils.find_spatial_pixel_index(self, xlo, xhi, ylo, yhi)
slices = [slice(limit_dict[xx + 'lo'], limit_dict[xx + 'hi'])
for xx in 'yx']
return self[tuple(slices)]
def to(self, unit, equivalencies=[], freq=None):
"""
Return a new `~spectral_cube.lower_dimensional_structures.Projection`
of the same class with the specified unit.
See `astropy.units.Quantity.to` for further details.
"""
return super(Projection, self).to(unit, equivalencies, freq)
# A slice is just like a projection in every way
class Slice(Projection):
pass
class BaseOneDSpectrum(LowerDimensionalObject, MaskableArrayMixinClass,
SpectralAxisMixinClass):
"""
Properties shared between OneDSpectrum and VaryingResolutionOneDSpectrum.
"""
def __new__(cls, value, unit=None, dtype=None, copy=True, wcs=None,
meta=None, mask=None, header=None, spectral_unit=None,
fill_value=np.nan, wcs_tolerance=0.0):
#log.debug("Creating a OneDSpectrum with __new__")
if np.asarray(value).ndim != 1:
raise ValueError("value should be a 1-d array")
if wcs is not None and wcs.wcs.naxis != 1:
raise ValueError("wcs should have two dimension")
self = u.Quantity.__new__(cls, value, unit=unit, dtype=dtype,
copy=copy).view(cls)
self._wcs = wcs
self._meta = {} if meta is None else meta
self._wcs_tolerance = wcs_tolerance
self._initial_set_mask(mask)
self._fill_value = fill_value
if header is not None:
self._header = header
else:
self._header = Header()
self._spectral_unit = spectral_unit
if spectral_unit is None:
if 'CUNIT1' in self._header:
self._spectral_unit = u.Unit(self._header['CUNIT1'])
elif self._wcs is not None:
self._spectral_unit = u.Unit(self._wcs.wcs.cunit[0])
return self
def __repr__(self):
prefixstr = '<' + self.__class__.__name__ + ' '
arrstr = np.array2string(self.filled_data[:].value, separator=',',
prefix=prefixstr)
return '{0}{1}{2:s}>'.format(prefixstr, arrstr, self._unitstr)
@staticmethod
def from_hdu(hdu):
'''
Return a OneDSpectrum from a FITS HDU or HDU list.
'''
if isinstance(hdu, HDUList):
hdul = hdu
hdu = hdul[0]
else:
hdul = HDUList([hdu])
if not len(hdu.data.shape) == 1:
raise ValueError("HDU must contain one-dimensional data.")
meta = {}
mywcs = wcs.WCS(hdu.header)
if "BUNIT" in hdu.header:
unit = cube_utils.convert_bunit(hdu.header["BUNIT"])
meta["BUNIT"] = hdu.header["BUNIT"]
else:
unit = None
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=FITSWarning)
beam = cube_utils.try_load_beams(hdul)
if hasattr(beam, '__len__'):
beams = beam
else:
beams = None
if beams is not None:
self = VaryingResolutionOneDSpectrum(hdu.data, unit=unit,
wcs=mywcs, meta=meta,
header=hdu.header,
beams=beams)
else:
beam = cube_utils.try_load_beam(hdu.header)
self = OneDSpectrum(hdu.data, unit=unit, wcs=mywcs, meta=meta,
header=hdu.header, beam=beam)
return self
@property
def header(self):
header = super(BaseOneDSpectrum, self).header
# Preserve the spectrum's spectral units
if 'CUNIT1' in header and self._spectral_unit != u.Unit(header['CUNIT1']):
spectral_scale = spectral_axis.wcs_unit_scale(self._spectral_unit)
header['CDELT1'] *= spectral_scale
header['CRVAL1'] *= spectral_scale
header['CUNIT1'] = self.spectral_axis.unit.to_string(format='FITS')
return header
@property
def spectral_axis(self):
"""
A `~astropy.units.Quantity` array containing the central values of
each channel along the spectral axis.
"""
if self._wcs is None:
spec_axis = np.arange(self.size) * u.one
else:
spec_axis = self.wcs.wcs_pix2world(np.arange(self.size), 0)[0] * \
u.Unit(self.wcs.wcs.cunit[0])
if self._spectral_unit is not None:
spec_axis = spec_axis.to(self._spectral_unit)
return spec_axis
def quicklook(self, filename=None, drawstyle='steps-mid', **kwargs):
"""
Plot the spectrum with current spectral units in the currently open
figure
kwargs are passed to `matplotlib.pyplot.plot`
Parameters
----------
filename : str or Non
Optional - the filename to save the quicklook to.
"""
from matplotlib import pyplot
ax = pyplot.gca()
ax.plot(self.spectral_axis, self.filled_data[:].value,
drawstyle=drawstyle, **kwargs)
ax.set_xlabel(self.spectral_axis.unit.to_string(format='latex'))
ax.set_ylabel(self.unit)
if filename is not None:
pyplot.gcf().savefig(filename)
def with_spectral_unit(self, unit, velocity_convention=None,
rest_value=None):
newwcs, newmeta = self._new_spectral_wcs(unit,
velocity_convention=velocity_convention,
rest_value=rest_value)
newheader = self._nowcs_header.copy()
newheader.update(newwcs.to_header())
wcs_cunit = u.Unit(newheader['CUNIT1'])
newheader['CUNIT1'] = unit.to_string(format='FITS')
newheader['CDELT1'] *= wcs_cunit.to(unit)
if self._mask is not None:
newmask = self._mask.with_spectral_unit(unit,
velocity_convention=velocity_convention,
rest_value=rest_value)
newmask._wcs = newwcs
else:
newmask = None
return self._new_spectrum_with(wcs=newwcs, spectral_unit=unit,
mask=newmask, meta=newmeta,
header=newheader)
def __getitem__(self, key, **kwargs):
# Ideally, this could just be in VaryingResolutionOneDSpectrum,
# but it's about the code is about the same length by just
# keeping it here.
try:
kwargs['beams'] = self.beams[key]
except (AttributeError, TypeError):
pass
new_qty = super(BaseOneDSpectrum, self).__getitem__(key)
if isinstance(key, slice):
new = self.__class__(value=new_qty.value,
unit=new_qty.unit,
copy=False,
wcs=wcs_utils.slice_wcs(self._wcs, key,
shape=self.shape),
meta=self._meta,
mask=(self._mask[key]
if self._mask is not nomask
else nomask),
header=self._header,
wcs_tolerance=self._wcs_tolerance,
fill_value=self.fill_value,
**kwargs)
return new
else:
if self._mask is not nomask:
# Kind of a hack; this is probably inefficient
bad = self._mask.exclude()[key]
if isinstance(bad, da.Array):
bad = bad.compute()
new_qty[bad] = np.nan
return new_qty
def __getattribute__(self, attrname):
# This is a hack to handle dimensionality-reducing functions
# We want spectrum.max() to return a Quantity, not a spectrum
# Long-term, we really want `OneDSpectrum` to not inherit from
# `Quantity`, but for now this approach works.... we just have
# to add more functions to this list.
if attrname in ('min', 'max', 'std', 'mean', 'sum', 'cumsum',
'nansum', 'ptp', 'var'):
return getattr(self.quantity, attrname)
else:
return super(BaseOneDSpectrum, self).__getattribute__(attrname)
def spectral_interpolate(self, spectral_grid,
suppress_smooth_warning=False,
fill_value=None):
"""
Resample the spectrum onto a specific grid
Parameters
----------
spectral_grid : array
An array of the spectral positions to regrid onto
suppress_smooth_warning : bool
If disabled, a warning will be raised when interpolating onto a
grid that does not nyquist sample the existing grid. Disable this
if you have already appropriately smoothed the data.
fill_value : float
Value for extrapolated spectral values that lie outside of
the spectral range defined in the original data. The
default is to use the nearest spectral channel in the
cube.
Returns
-------
spectrum : OneDSpectrum
"""
assert spectral_grid.ndim == 1
inaxis = self.spectral_axis.to(spectral_grid.unit)
indiff = np.mean(np.diff(inaxis))
outdiff = np.mean(np.diff(spectral_grid))
# account for reversed axes
if outdiff < 0:
spectral_grid = spectral_grid[::-1]
outdiff = np.mean(np.diff(spectral_grid))
outslice = slice(None, None, -1)
else:
outslice = slice(None, None, 1)
specslice = slice(None) if indiff >= 0 else slice(None, None, -1)
inaxis = inaxis[specslice]
indiff = np.mean(np.diff(inaxis))
# insanity checks
if indiff < 0 or outdiff < 0:
raise ValueError("impossible.")
assert np.all(np.diff(spectral_grid) > 0)
assert np.all(np.diff(inaxis) > 0)
np.testing.assert_allclose(np.diff(spectral_grid), outdiff,
err_msg="Output grid must be linear")
if outdiff > 2 * indiff and not suppress_smooth_warning:
warnings.warn("Input grid has too small a spacing. The data should "
"be smoothed prior to resampling.",
SmoothingWarning
)
newspec = np.empty([spectral_grid.size], dtype=self.dtype)
newmask = np.empty([spectral_grid.size], dtype='bool')
newspec[outslice] = np.interp(spectral_grid.value, inaxis.value,
self.filled_data[specslice].value,
left=fill_value, right=fill_value)
mask = self.mask.include()
if all(mask):
newmask = np.ones([spectral_grid.size], dtype='bool')
else:
interped = np.interp(spectral_grid.value,
inaxis.value, mask[specslice]) > 0
newmask[outslice] = interped
newwcs = self.wcs.deepcopy()
newwcs.wcs.crpix[0] = 1
newwcs.wcs.crval[0] = spectral_grid[0].value if outslice.step > 0 \
else spectral_grid[-1].value
newwcs.wcs.cunit[0] = spectral_grid.unit.to_string(format='FITS')
newwcs.wcs.cdelt[0] = outdiff.value if outslice.step > 0 \
else -outdiff.value
newwcs.wcs.set()
newheader = self._nowcs_header.copy()
newheader.update(newwcs.to_header())
wcs_cunit = u.Unit(newheader['CUNIT1'])
newheader['CUNIT1'] = spectral_grid.unit.to_string(format='FITS')
newheader['CDELT1'] *= wcs_cunit.to(spectral_grid.unit)
newbmask = BooleanArrayMask(newmask, wcs=newwcs)
return self._new_spectrum_with(data=newspec, wcs=newwcs, mask=newbmask,
header=newheader,
spectral_unit=spectral_grid.unit)
def spectral_smooth(self, kernel,
convolve=convolution.convolve,
**kwargs):
"""
Smooth the spectrum
Parameters
----------
kernel : `~astropy.convolution.Kernel1D`
A 1D kernel from astropy
convolve : function
The astropy convolution function to use, either
`astropy.convolution.convolve` or
`astropy.convolution.convolve_fft`
kwargs : dict
Passed to the convolve function
"""
newspec = convolve(self.value, kernel, normalize_kernel=True, **kwargs)
return self._new_spectrum_with(data=newspec)
def to(self, unit, equivalencies=[]):
"""
Return a new `~spectral_cube.lower_dimensional_structures.OneDSpectrum`
of the same class with the specified unit.
See `astropy.units.Quantity.to` for further details.
"""
return super(BaseOneDSpectrum, self).to(unit, equivalencies, freq=None)
def with_fill_value(self, fill_value):
"""
Create a new :class:`OneDSpectrum` with a different ``fill_value``.
"""
return self._new_spectrum_with(fill_value=fill_value)
@property
def _new_thing_with(self):
return self._new_spectrum_with
def _new_spectrum_with(self, data=None, wcs=None, mask=None, meta=None,
fill_value=None, spectral_unit=None, unit=None,
header=None, wcs_tolerance=None,
**kwargs):
data = self._data if data is None else data
if unit is None and hasattr(data, 'unit'):
if data.unit != self.unit:
raise u.UnitsError("New data unit '{0}' does not"
" match unit '{1}'. You can"
" override this by specifying the"
" `unit` keyword."
.format(data.unit, self.unit))
unit = data.unit
elif unit is None:
unit = self.unit
elif unit is not None:
# convert string units to Units
if not isinstance(unit, u.Unit):
unit = u.Unit(unit)
if hasattr(data, 'unit'):
if u.Unit(unit) != data.unit:
raise u.UnitsError("The specified new cube unit '{0}' "
"does not match the input unit '{1}'."
.format(unit, data.unit))
else:
data = u.Quantity(data, unit=unit, copy=False)
wcs = self._wcs if wcs is None else wcs
mask = self._mask if mask is None else mask
if meta is None:
meta = {}
meta.update(self._meta)
if unit is not None:
meta['BUNIT'] = unit.to_string(format='FITS')
fill_value = self._fill_value if fill_value is None else fill_value
spectral_unit = self._spectral_unit if spectral_unit is None else u.Unit(spectral_unit)
spectrum = self.__class__(value=data, wcs=wcs, mask=mask, meta=meta,
unit=unit, fill_value=fill_value,
header=header or self._header,
wcs_tolerance=wcs_tolerance or self._wcs_tolerance,
**kwargs)
spectrum._spectral_unit = spectral_unit
return spectrum
class OneDSpectrum(BaseOneDSpectrum, BeamMixinClass):
def __new__(cls, value, beam=None, read_beam=False, **kwargs):
self = super(OneDSpectrum, cls).__new__(cls, value, **kwargs)
if beam is None:
if "beam" in self.meta:
beam = self.meta['beam']
elif read_beam:
beam = cube_utils.try_load_beam(self.header)
if beam is None:
warnings.warn("Cannot load beam from header.",
BeamWarning
)
if beam is not None:
self.beam = beam
self.meta['beam'] = beam
self._cache = {}
return self
def _new_spectrum_with(self, **kwargs):
beam = kwargs.pop('beam', None)
if 'beam' in self._meta and beam is None:
beam = self.beam
out = super(OneDSpectrum, self)._new_spectrum_with(beam=beam, **kwargs)
return out
def with_beam(self, beam):
'''
Attach a new beam object to the OneDSpectrum.
Parameters
----------
beam : `~radio_beam.Beam`
A new beam object.
'''
meta = self.meta.copy()
meta['beam'] = beam
return self._new_spectrum_with(beam=beam, meta=meta)
class VaryingResolutionOneDSpectrum(BaseOneDSpectrum, MultiBeamMixinClass):
def __new__(cls, value, beams=None, read_beam=False, goodbeams_mask=None, **kwargs):
self = super(VaryingResolutionOneDSpectrum, cls).__new__(cls, value, **kwargs)
assert hasattr(self, '_fill_value')
if beams is None:
if "beams" in self.meta:
beams = self.meta['beams']
elif read_beam:
beams = cube_utils.try_load_beams(self.header)
if beams is None:
warnings.warn("Cannot load beams table from header.",
BeamWarning
)
if beams is not None:
if isinstance(beams, BinTableHDU):
beam_data_table = beams.data
elif isinstance(beams, FITS_rec):
beam_data_table = beams
else:
beam_data_table = None
if beam_data_table is not None:
beams = Beams(major=u.Quantity(beam_data_table['BMAJ'], u.arcsec),
minor=u.Quantity(beam_data_table['BMIN'], u.arcsec),
pa=u.Quantity(beam_data_table['BPA'], u.deg),
meta=[{key: row[key] for key in beam_data_table.names
if key not in ('BMAJ','BPA', 'BMIN')}
for row in beam_data_table],)
self.beams = beams
self.meta['beams'] = beams
if goodbeams_mask is not None:
self.goodbeams_mask = goodbeams_mask
self._cache = {}
return self
@property
def hdu(self):
warnings.warn("There are multiple beams for this spectrum that "
"are being ignored when creating the HDU.",
BeamWarning
)
return super(VaryingResolutionOneDSpectrum, self).hdu
@property
def hdulist(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
hdu = self.hdu
beamhdu = cube_utils.beams_to_bintable(self.beams)
return HDUList([hdu, beamhdu])
def _new_spectrum_with(self, **kwargs):
beams = kwargs.pop('beams', self.beams)
if beams is None:
beams = self.beams
VRODS = VaryingResolutionOneDSpectrum
out = super(VRODS, self)._new_spectrum_with(beams=beams,
**kwargs)
return out
def __array_finalize__(self, obj):
super(VaryingResolutionOneDSpectrum, self).__array_finalize__(obj)
self._beams = getattr(obj, '_beams', None)
if getattr(obj, 'goodbeams_mask', None) is not None:
# do NOT use the setter here, because we sometimes need to write
# intermediate size-mismatch things that later get fixed, e.g., in
# __getitem__ below
self._goodbeams_mask = getattr(obj, 'goodbeams_mask', None)
def __getitem__(self, key):
new_qty = super(VaryingResolutionOneDSpectrum, self).__getitem__(key)
# use the goodbeams_mask setter here because it checks size
new_qty.goodbeams_mask = self.goodbeams_mask[key]
new_qty.beams = self.unmasked_beams[key]
return new_qty
| bsd-3-clause |
chenyyx/scikit-learn-doc-zh | examples/zh/text/document_clustering.py | 21 | 8531 | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
Additionally, latent semantic analysis can also be used to reduce dimensionality
and discover latent patterns in the data.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <[email protected]>
# Lars Buitinck
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
def is_interactive():
return not hasattr(sys.modules['__main__'], '__file__')
# work-around for Jupyter notebook and IPython console
argv = [] if is_interactive() else sys.argv[1:]
(opts, args) = op.parse_args(argv)
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
# #############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
# categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', alternate_sign=False,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
alternate_sign=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
# #############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
| gpl-3.0 |
mfherbst/spack | var/spack/repos/builtin/packages/py-meep/package.py | 5 | 3434 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyMeep(PythonPackage):
"""Python-meep is a wrapper around libmeep. It allows the scripting of
Meep-simulations with Python"""
homepage = "https://launchpad.net/python-meep"
url = "https://launchpad.net/python-meep/1.4/1.4/+download/python-meep-1.4.2.tar"
version('1.4.2', 'f8913542d18b0dda92ebc64f0a10ce56')
variant('mpi', default=True, description='Enable MPI support')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-scipy', type=('build', 'run'))
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('mpi', when='+mpi')
depends_on('meep~mpi', when='~mpi')
depends_on('meep+mpi', when='+mpi')
# As of SWIG 3.0.3, Python-style comments are now treated as
# pre-processor directives. Use older SWIG. But not too old,
# or else it can't handle newer C++ compilers and flags.
depends_on('[email protected]:3.0.2')
phases = ['clean', 'build_ext', 'install', 'bdist']
def setup_file(self):
return 'setup-mpi.py' if '+mpi' in self.spec else 'setup.py'
def common_args(self, spec, prefix):
include_dirs = [
spec['meep'].prefix.include,
spec['py-numpy'].include
]
library_dirs = [
spec['meep'].prefix.lib
]
if '+mpi' in spec:
include_dirs.append(spec['mpi'].prefix.include)
library_dirs.append(spec['mpi'].prefix.lib)
include_flags = '-I{0}'.format(','.join(include_dirs))
library_flags = '-L{0}'.format(','.join(library_dirs))
# FIXME: For some reason, this stopped working.
# The -I and -L are no longer being properly forwarded to setup.py:
# meep_common.i:87: Error: Unable to find 'meep/mympi.hpp'
# meep_common.i:88: Error: Unable to find 'meep/vec.hpp'
# meep_common.i:89: Error: Unable to find 'meep.hpp'
return [include_flags, library_flags]
def clean_args(self, spec, prefix):
return ['--all']
def build_ext_args(self, spec, prefix):
return self.common_args(spec, prefix)
def bdist_args(self, spec, prefix):
return self.common_args(spec, prefix)
| lgpl-2.1 |
jchodera/LiquidBenchmark | src/figures/plot_tbv.py | 2 | 5702 | import numpy as np
import sklearn.metrics, sklearn.cross_validation
import statsmodels.formula.api as sm
import simtk.unit as u
import polarizability
import matplotlib.pyplot as plt
import pandas as pd
FIGURE_SIZE = (6.5, 6.5)
DPI = 1600
expt = pd.read_csv("./tables/data_with_metadata.csv")
expt["temperature"] = expt["Temperature, K"]
pred = pd.read_csv("./tables/predictions.csv")
pred["polcorr"] = pd.Series(dict((cas, polarizability.dielectric_correction_from_formula(formula, density * u.grams / u.milliliter)) for cas, (formula, density) in pred[["formula", "density"]].iterrows()))
pred["corrected_dielectric"] = pred["polcorr"] + pred["dielectric"]
expt = expt.set_index(["cas", "temperature"]) # Can't do this because of duplicates # Should be fixed now, probably due to the CAS / name duplication issue found by Julie.
#expt = expt.groupby(["cas", "temperature"]).mean() # Fix a couple of duplicates, not sure how they got there.
pred = pred.set_index(["cas", "temperature"])
pred["expt_density"] = expt["Mass density, kg/m3"]
pred["expt_dielectric"] = expt["Relative permittivity at zero frequency"]
#pred["expt_density_std"] = expt["Mass density, kg/m3_std"]
pred["expt_density_std"] = expt["Mass density, kg/m3_uncertainty_bestguess"]
#pred["expt_dielectric_std"] = expt["Relative permittivity at zero frequency_std"]
pred["expt_dielectric_std"] = expt["Relative permittivity at zero frequency_uncertainty_bestguess"]
plt.figure(figsize=FIGURE_SIZE, dpi=DPI)
for (formula, grp) in pred.groupby("formula"):
x, y = grp["density"], grp["expt_density"]
xerr = grp["density_sigma"]
yerr = grp["expt_density_std"].replace(np.nan, 0.0)
x = x / 1000. # Convert kg / m3 to g / mL
y = y / 1000. # Convert kg / m3 to g / mL
xerr = xerr / 1000. # Convert kg / m3 to g / mL
yerr = yerr / 1000. # Convert kg / m3 to g / mL
plt.errorbar(x, y, xerr=xerr, yerr=yerr, fmt='.', label=formula)
plt.plot([.600, 1.400], [.600, 1.400], 'k', linewidth=1)
plt.xlim((.600, 1.400))
plt.ylim((.600, 1.400))
plt.xlabel("Predicted (GAFF)")
plt.ylabel("Experiment (ThermoML)")
plt.gca().set_aspect('equal', adjustable='box')
plt.draw()
x, y = pred["density"], pred["expt_density"]
relative_rms = (((x - y) / x)**2).mean()** 0.5
cv = sklearn.cross_validation.Bootstrap(len(x), train_size=len(x) - 1, n_iter=100)
relative_rms_grid = np.array([(((x[ind] - y[ind]) / x[ind])**2).mean()** 0.5 for ind, _ in cv])
relative_rms_err = relative_rms_grid.std()
plt.title(r"Density [g cm$^{-3}$]")
plt.savefig("./manuscript/figures/densities_thermoml.pdf", bbox_inches="tight")
plt.savefig("./manuscript/figures/densities_thermoml.tif", bbox_inches="tight")
plt.figure(figsize=FIGURE_SIZE, dpi=DPI)
for (formula, grp) in pred.groupby("formula"):
x, y = grp["density"], grp["expt_density"]
xerr = grp["density_sigma"]
yerr = grp["expt_density_std"].replace(np.nan, 0.0)
x = x / 1000. # Convert kg / m3 to g / mL
y = y / 1000. # Convert kg / m3 to g / mL
xerr = xerr / 1000. # Convert kg / m3 to g / mL
yerr = yerr / 1000. # Convert kg / m3 to g / mL
plt.errorbar(x - y, y, xerr=xerr, yerr=yerr, fmt='.', label=formula)
plt.xlim((-0.1, 0.1))
plt.ylim((.600, 1.400))
plt.xlabel("Predicted - Experiment")
plt.ylabel("Experiment (ThermoML)")
plt.gca().set_aspect('auto', adjustable='box')
plt.draw()
x, y = pred["density"], pred["expt_density"]
relative_rms = (((x - y) / x)**2).mean()** 0.5
cv = sklearn.cross_validation.Bootstrap(len(x), train_size=len(x) - 1, n_iter=100)
relative_rms_grid = np.array([(((x[ind] - y[ind]) / x[ind])**2).mean()** 0.5 for ind, _ in cv])
relative_rms_err = relative_rms_grid.std()
plt.title(r"Density [g cm$^{-3}$]")
plt.savefig("./manuscript/figures/densities_differences_thermoml.pdf", bbox_inches="tight")
plt.savefig("./manuscript/figures/densities_differences_thermoml.tif", bbox_inches="tight")
yerr = pred["expt_dielectric_std"].replace(np.nan, 0.0)
xerr = pred["dielectric_sigma"].replace(np.nan, 0.0)
plt.figure(figsize=FIGURE_SIZE, dpi=DPI)
plt.xlabel("Predicted (GAFF)")
plt.ylabel("Experiment (ThermoML)")
plt.title("Inverse Static Dielectric Constant")
#ticks = np.concatenate([np.arange(1, 10), 10 * np.arange(1, 10)])
#xticks(ticks)
#yticks(ticks)
plt.plot([0.0, 1], [0.0, 1], 'k') # Guide
#xscale('log')
#yscale('log')
x, y = pred["dielectric"], pred["expt_dielectric"]
ols_model = sm.OLS(y, x)
ols_results = ols_model.fit()
r2 = ols_results.rsquared
#plt.errorbar(x, y, xerr=xerr, yerr=yerr, fmt='.', label="GAFF (R^2 = %.3f)" % r2)
#plt.errorbar(x, y, xerr=xerr, yerr=yerr, fmt='.', label="GAFF")
plt.errorbar(x ** -1, y ** -1, xerr=xerr * x ** -2, yerr=yerr * y ** -2, fmt='.', label="GAFF") # Transform xerr and yerr for 1 / epsilon plot
plt.xlim((0.0, 1))
plt.ylim((0.0, 1))
plt.legend(loc=0)
plt.gca().set_aspect('equal', adjustable='box')
plt.draw()
plt.savefig("./manuscript/figures/dielectrics_thermoml_nocorr.pdf", bbox_inches="tight")
x, y = pred["corrected_dielectric"], pred["expt_dielectric"]
ols_model = sm.OLS(y, x)
ols_results = ols_model.fit()
r2 = ols_results.rsquared
#plt.errorbar(x, y, xerr=xerr, yerr=yerr, fmt='.', label="Corrected (R^2 = %.3f)" % r2)
#plt.errorbar(x, y, xerr=xerr, yerr=yerr, fmt='.', label="Corrected")
plt.errorbar(x ** -1, y ** -1, xerr=xerr * x ** -2, yerr=yerr * y ** -2, fmt='.', label="Corrected") # Transform xerr and yerr for 1 / epsilon plot
plt.xlim((0.0, 1.02))
plt.ylim((0.0, 1.02))
plt.legend(loc=0)
plt.gca().set_aspect('equal', adjustable='box')
plt.draw()
plt.savefig("./manuscript/figures/dielectrics_thermoml.pdf", bbox_inches="tight")
plt.savefig("./manuscript/figures/dielectrics_thermoml.tif", bbox_inches="tight")
| gpl-2.0 |
DavidMcDonald1993/ghsom | yeast_script.py | 1 | 6872 |
# coding: utf-8
# In[ ]:
import os
import pickle
import shutil
import Queue
import networkx as nx
import numpy as np
import pandas as pd
# from ghsom import main_no_labels as ghsom_main
# from ghsom_parallel import main as ghsom_main
# from ghsom_parallel_signal_frequency import main as ghsom_main
from ghsom_parallel_edge_constrained import main as ghsom_main
def save_obj(obj, name):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
root_dir = "/home/david/Documents/ghsom/hierarchical_exploration_constrained/"
# for data in ["yeast_reactome", "yeast_uetz", "collins", "ccsb", "ito_core", "lc_multiple"]:
for data in ["yeast_reactome"]:
print "data={}".format(data)
print
# for e_sg in [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]:
# for e_sg in [0.8, 0.7, 0.6, 0.5, 0.4]:
for e_sg in [0.8, 0.6, 0.5]:
print "e_sg={}".format(e_sg)
print
# for e_en in [0.7, 0.6, 0.5, 0.4, 0.3]:
# for e_en in [e_sg]:
for e_en in [0.3]:
print "e_en={}".format(e_en)
print
os.chdir(root_dir)
#ghsom parameters
params = {'eta': 0.001,
'sigma': 1,
'e_sg': e_sg,
'e_en': e_en}
map_file = '{}_hierarchy_communities_{}_{}'.format(data, e_sg, e_en)
if not os.path.isfile("{}.pkl".format(map_file)):
#run ghsom and save output
print "running GHSOM and saving to {}.pkl".format(map_file)
# G, map = ghsom_main(params, '../embedded_{}.gpickle'.format(data), lam=1000)
# save_obj((G, map), map_file)
# print '\nnumber of communities found: {}, saved maps to {}'.format(len(map), map_file)
G, networks = ghsom_main(params, '../embedded_{}.gpickle'.format(data), num_iter=1000, num_threads=5)
save_obj((G, networks), map_file)
print '\nnumber of maps grown: {}, saved maps to {}'.format(len(networks), map_file)
else:
print "{}.pkl already exists, loading maps".format(map_file)
#load output
# G, map = load_obj(map_file)
G, networks = load_obj(map_file)
#save results to file
dir_name = "{}_hierarchy_communities_{}_{}".format(data, e_sg, e_en)
if not os.path.isdir(dir_name):
# shutil.rmtree(dir_name)
os.mkdir(dir_name)
print 'made directory {}'.format(dir_name)
os.chdir(dir_name)
print "moved to {}".format(dir_name)
#all genes
all_genes_file = "all_genes.txt"
with open(all_genes_file, 'w') as f:
for n, d in G.nodes(data=True):
f.write("{}\n".format(n))
print "wrote {}".format(all_genes_file)
genes = G.nodes()
gene_assignments = {k: v for k, v in zip(genes,
np.array([["" for j in range(100)] for i in range(len(genes))], dtype="S100"))}
for map_id, network, e in networks[1:]:
#shortest path matrix
shortest_path = nx.floyd_warshall_numpy(network)
communities_in_this_map = np.array([v for k, v in nx.get_node_attributes(network, "ID").items()])
shortest_path_df = pd.DataFrame(shortest_path, index=communities_in_this_map)
shortest_path_file = "{}_shortest_path.csv".format(map_id)
shortest_path_df.to_csv(shortest_path_file, index=True, header=False, sep=',')
print 'wrote shortest path matrix and saved as {}'.format(shortest_path_file)
for n, d in network.nodes(data=True):
community_assignment = d["ID"]
layer = community_assignment.count("-")
for node in d["ls"]:
gene_assignments[node][layer] = community_assignment
# #map queue
# q = Queue.Queue()
# c = 1
# depth = 0
# q.put((c, depth, map))
# genes = G.nodes()
# gene_assignments = {k: v for k, v in zip(genes,
# np.array([["" for j in range(10)] for i in range(len(genes))], dtype="S20"))}
# while not q.empty():
# map_id, depth, map = q.get()
# c = 1
# #shortest path matrix
# communities_in_this_map = np.array(["{}-{}".format(str(map_id).zfill(2),
# str(i).zfill(2)) for i in range(c, c + len(map))])
# shortest_path = nx.floyd_warshall_numpy(map).astype(np.int)
# # shortest_path = np.insert(shortest_path, 0, communities_in_this_map, axis=1)
# shortest_path_df = pd.DataFrame(shortest_path, index=communities_in_this_map)
# shortest_path_file = "{}_shortest_path.csv".format(str(map_id).zfill(2))
# # np.savetxt(shortest_path_file, shortest_path, fmt='%i', delimiter=",")
# shortest_path_df.to_csv(shortest_path_file, index=True, header=False, sep=',')
# print 'wrote shortest path matrix and saved as {}'.format(shortest_path_file)
# #gene community assignments
# for n, d in map.nodes(data=True):
# community = communities_in_this_map[c - 1]
# for node in d['ls']:
# gene_assignments[node][depth] = community
# #add map to queue
# m = d['n']
# if not m == []:
# q.put((community, depth + 1, m))
# c += 1
#back to matrix
assignment_matrix = np.array([v for k, v in gene_assignments.items()])
#remove unnecessary columns
mask = assignment_matrix != ""
idx = mask.any(axis = 0)
assignment_matrix = assignment_matrix[:,idx]
assignment_matrix = np.insert(assignment_matrix, 0, "01", axis=1)
assignment_df = pd.DataFrame(assignment_matrix, index=genes)
assignment_file = "assignment_matrix.csv"
assignment_df.to_csv(assignment_file, index=True, header=False, sep=',')
print "wrote assignment matrix and saved it as {}".format(assignment_file)
print
print "DONE"
# In[ ]:
| gpl-2.0 |
bloyl/mne-python | examples/decoding/linear_model_patterns.py | 6 | 4352 | # -*- coding: utf-8 -*-
"""
.. _ex-linear-patterns:
===============================================================
Linear classifier on sensor data with plot patterns and filters
===============================================================
Here decoding, a.k.a MVPA or supervised machine learning, is applied to M/EEG
data in sensor space. Fit a linear classifier with the LinearModel object
providing topographical patterns which are more neurophysiologically
interpretable :footcite:`HaufeEtAl2014` than the classifier filters (weight
vectors). The patterns explain how the MEG and EEG data were generated from
the discriminant neural sources which are extracted by the filters.
Note patterns/filters in MEG data are more similar than EEG data
because the noise is less spatially correlated in MEG than EEG.
"""
# Authors: Alexandre Gramfort <[email protected]>
# Romain Trachel <[email protected]>
# Jean-Remi King <[email protected]>
#
# License: BSD (3-clause)
import mne
from mne import io, EvokedArray
from mne.datasets import sample
from mne.decoding import Vectorizer, get_coef
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
# import a linear classifier from mne.decoding
from mne.decoding import LinearModel
print(__doc__)
data_path = sample.data_path()
sample_path = data_path + '/MEG/sample'
###############################################################################
# Set parameters
raw_fname = sample_path + '/sample_audvis_filt-0-40_raw.fif'
event_fname = sample_path + '/sample_audvis_filt-0-40_raw-eve.fif'
tmin, tmax = -0.1, 0.4
event_id = dict(aud_l=1, vis_l=3)
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname, preload=True)
raw.filter(.5, 25, fir_design='firwin')
events = mne.read_events(event_fname)
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
decim=2, baseline=None, preload=True)
del raw
labels = epochs.events[:, -1]
# get MEG and EEG data
meg_epochs = epochs.copy().pick_types(meg=True, eeg=False)
meg_data = meg_epochs.get_data().reshape(len(labels), -1)
###############################################################################
# Decoding in sensor space using a LogisticRegression classifier
# --------------------------------------------------------------
clf = LogisticRegression(solver='lbfgs')
scaler = StandardScaler()
# create a linear model with LogisticRegression
model = LinearModel(clf)
# fit the classifier on MEG data
X = scaler.fit_transform(meg_data)
model.fit(X, labels)
# Extract and plot spatial filters and spatial patterns
for name, coef in (('patterns', model.patterns_), ('filters', model.filters_)):
# We fitted the linear model onto Z-scored data. To make the filters
# interpretable, we must reverse this normalization step
coef = scaler.inverse_transform([coef])[0]
# The data was vectorized to fit a single model across all time points and
# all channels. We thus reshape it:
coef = coef.reshape(len(meg_epochs.ch_names), -1)
# Plot
evoked = EvokedArray(coef, meg_epochs.info, tmin=epochs.tmin)
evoked.plot_topomap(title='MEG %s' % name, time_unit='s')
###############################################################################
# Let's do the same on EEG data using a scikit-learn pipeline
X = epochs.pick_types(meg=False, eeg=True)
y = epochs.events[:, 2]
# Define a unique pipeline to sequentially:
clf = make_pipeline(
Vectorizer(), # 1) vectorize across time and channels
StandardScaler(), # 2) normalize features across trials
LinearModel(
LogisticRegression(solver='lbfgs'))) # 3) fits a logistic regression
clf.fit(X, y)
# Extract and plot patterns and filters
for name in ('patterns_', 'filters_'):
# The `inverse_transform` parameter will call this method on any estimator
# contained in the pipeline, in reverse order.
coef = get_coef(clf, name, inverse_transform=True)
evoked = EvokedArray(coef, epochs.info, tmin=epochs.tmin)
evoked.plot_topomap(title='EEG %s' % name[:-1], time_unit='s')
###############################################################################
# References
# ----------
# .. footbibliography::
| bsd-3-clause |
hainm/MSMs | attic/src/code/hmsm/trim_hmsm.py | 3 | 1243 | import shutil
import numpy as np
import pandas as pd
import mdtraj as md
from mixtape.utils import iterobjects
import mixtape.ghmm
import mixtape.featurizer
import os
name = "atomindices"
json_filename = "./%s.jsonlines" % name
feature_filename = "./%s.pkl" % name
models = list(iterobjects(json_filename))
df = pd.DataFrame(models)
x = df.ix[0]
T = np.array(x["transmat"])
p = np.array(x["populations"])
featurizer = mixtape.featurizer.load(feature_filename)
model = mixtape.ghmm.GaussianFusionHMM(3, featurizer.n_features)
model.means_ = x["means"]
model.vars_ = x["vars"]
model.transmat_ = x["transmat"]
model.populations_ = x["populations"]
trj0 = md.load("./system.subset.pdb")
atom_indices = np.loadtxt("./AtomIndices.dat", "int")
n_traj = 348
#n_traj = 131
scores = np.zeros(n_traj)
for i in range(n_traj):
print(i)
traj = md.load("./Trajectories/trj%d.h5" % i)
features = featurizer.featurize(traj)
scores[i] = model.score([features]) / float(len(features))
cutoff = 500.0 # atomindices
#cutoff = 0.0 # atompairs
k = 0
for i in range(n_traj):
if scores[i] > cutoff:
print(i, k)
shutil.copy("./Trajectories/trj%d.h5" % i, "./subset_%s/Trajectories/trj%d.h5" % (name, k))
k += 1
| gpl-2.0 |
leesavide/pythonista-docs | Documentation/matplotlib/examples/misc/rc_traits.py | 6 | 5531 | # Here is some example code showing how to define some representative
# rc properties and construct a matplotlib artist using traits.
# matplotlib does not ship with enthought.traits, so you will need to
# install it separately.
from __future__ import print_function
import sys, os, re
import traits.api as traits
from matplotlib.cbook import is_string_like
from matplotlib.artist import Artist
doprint = True
flexible_true_trait = traits.Trait(
True,
{ 'true': True, 't': True, 'yes': True, 'y': True, 'on': True, True: True,
'false': False, 'f': False, 'no': False, 'n': False, 'off': False, False: False
} )
flexible_false_trait = traits.Trait( False, flexible_true_trait )
colors = {
'c' : '#00bfbf',
'b' : '#0000ff',
'g' : '#008000',
'k' : '#000000',
'm' : '#bf00bf',
'r' : '#ff0000',
'w' : '#ffffff',
'y' : '#bfbf00',
'gold' : '#FFD700',
'peachpuff' : '#FFDAB9',
'navajowhite' : '#FFDEAD',
}
def hex2color(s):
"Convert hex string (like html uses, eg, #efefef) to a r,g,b tuple"
return tuple([int(n, 16)/255.0 for n in (s[1:3], s[3:5], s[5:7])])
class RGBA(traits.HasTraits):
# r,g,b,a in the range 0-1 with default color 0,0,0,1 (black)
r = traits.Range(0., 1., 0.)
g = traits.Range(0., 1., 0.)
b = traits.Range(0., 1., 0.)
a = traits.Range(0., 1., 1.)
def __init__(self, r=0., g=0., b=0., a=1.):
self.r = r
self.g = g
self.b = b
self.a = a
def __repr__(self):
return 'r,g,b,a = (%1.2f, %1.2f, %1.2f, %1.2f)'%\
(self.r, self.g, self.b, self.a)
def tuple_to_rgba(ob, name, val):
tup = [float(x) for x in val]
if len(tup)==3:
r,g,b = tup
return RGBA(r,g,b)
elif len(tup)==4:
r,g,b,a = tup
return RGBA(r,g,b,a)
else:
raise ValueError
tuple_to_rgba.info = 'a RGB or RGBA tuple of floats'
def hex_to_rgba(ob, name, val):
rgx = re.compile('^#[0-9A-Fa-f]{6}$')
if not is_string_like(val):
raise TypeError
if rgx.match(val) is None:
raise ValueError
r,g,b = hex2color(val)
return RGBA(r,g,b,1.0)
hex_to_rgba.info = 'a hex color string'
def colorname_to_rgba(ob, name, val):
hex = colors[val.lower()]
r,g,b = hex2color(hex)
return RGBA(r,g,b,1.0)
colorname_to_rgba.info = 'a named color'
def float_to_rgba(ob, name, val):
val = float(val)
return RGBA(val, val, val, 1.)
float_to_rgba.info = 'a grayscale intensity'
Color = traits.Trait(RGBA(), float_to_rgba, colorname_to_rgba, RGBA,
hex_to_rgba, tuple_to_rgba)
def file_exists(ob, name, val):
fh = file(val, 'r')
return val
def path_exists(ob, name, val):
os.path.exists(val)
linestyles = ('-', '--', '-.', ':', 'steps', 'None')
TICKLEFT, TICKRIGHT, TICKUP, TICKDOWN = range(4)
linemarkers = (None, '.', ',', 'o', '^', 'v', '<', '>', 's',
'+', 'x', 'd', 'D', '|', '_', 'h', 'H',
'p', '1', '2', '3', '4',
TICKLEFT,
TICKRIGHT,
TICKUP,
TICKDOWN,
'None'
)
class LineRC(traits.HasTraits):
linewidth = traits.Float(0.5)
linestyle = traits.Trait(*linestyles)
color = Color
marker = traits.Trait(*linemarkers)
markerfacecolor = Color
markeredgecolor = Color
markeredgewidth = traits.Float(0.5)
markersize = traits.Float(6)
antialiased = flexible_true_trait
data_clipping = flexible_false_trait
class PatchRC(traits.HasTraits):
linewidth = traits.Float(1.0)
facecolor = Color
edgecolor = Color
antialiased = flexible_true_trait
timezones = 'UTC', 'US/Central', 'ES/Eastern' # fixme: and many more
backends = ('GTKAgg', 'Cairo', 'GDK', 'GTK', 'Agg',
'GTKCairo', 'PS', 'SVG', 'Template', 'TkAgg',
'WX')
class RC(traits.HasTraits):
backend = traits.Trait(*backends)
interactive = flexible_false_trait
toolbar = traits.Trait('toolbar2', 'classic', None)
timezone = traits.Trait(*timezones)
lines = traits.Trait(LineRC())
patch = traits.Trait(PatchRC())
rc = RC()
rc.lines.color = 'r'
if doprint:
print('RC')
rc.print_traits()
print('RC lines')
rc.lines.print_traits()
print('RC patches')
rc.patch.print_traits()
class Patch(Artist, traits.HasTraits):
linewidth = traits.Float(0.5)
facecolor = Color
fc = facecolor
edgecolor = Color
fill = flexible_true_trait
def __init__(self,
edgecolor=None,
facecolor=None,
linewidth=None,
antialiased = None,
fill=1,
**kwargs
):
Artist.__init__(self)
if edgecolor is None: edgecolor = rc.patch.edgecolor
if facecolor is None: facecolor = rc.patch.facecolor
if linewidth is None: linewidth = rc.patch.linewidth
if antialiased is None: antialiased = rc.patch.antialiased
self.edgecolor = edgecolor
self.facecolor = facecolor
self.linewidth = linewidth
self.antialiased = antialiased
self.fill = fill
p = Patch()
p.facecolor = '#bfbf00'
p.edgecolor = 'gold'
p.facecolor = (1,.5,.5,.25)
p.facecolor = 0.25
p.fill = 'f'
print('p.facecolor', type(p.facecolor), p.facecolor)
print('p.fill', type(p.fill), p.fill)
if p.fill_: print('fill')
else: print('no fill')
if doprint:
print()
print('Patch')
p.print_traits()
| apache-2.0 |
djajetic/AutoML3Final | lib/libscores.py | 4 | 30033 | # Score library for NUMPY arrays
# ChaLearn AutoML challenge
# For regression:
# solution and prediction are vectors of numerical values of the same dimension
# For classification:
# solution = array(p,n) of 0,1 truth values, samples in lines, classes in columns
# prediction = array(p,n) of numerical scores between 0 and 1 (analogous to probabilities)
# Isabelle Guyon and Arthur Pesah, ChaLearn, August-November 2014
# ALL INFORMATION, SOFTWARE, DOCUMENTATION, AND DATA ARE PROVIDED "AS-IS".
# ISABELLE GUYON, CHALEARN, AND/OR OTHER ORGANIZERS OR CODE AUTHORS DISCLAIM
# ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ANY PARTICULAR PURPOSE, AND THE
# WARRANTY OF NON-INFRINGEMENT OF ANY THIRD PARTY'S INTELLECTUAL PROPERTY RIGHTS.
# IN NO EVENT SHALL ISABELLE GUYON AND/OR OTHER ORGANIZERS BE LIABLE FOR ANY SPECIAL,
# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF SOFTWARE, DOCUMENTS, MATERIALS,
# PUBLICATIONS, OR INFORMATION MADE AVAILABLE FOR THE CHALLENGE.
from sklearn import metrics
import numpy as np
import scipy as sp
import os
from sklearn.preprocessing import *
from sys import stderr
from sys import version
swrite = stderr.write
from os import getcwd as pwd
from pip import get_installed_distributions as lib
from glob import glob
import platform
import psutil
if (os.name == "nt"):
filesep = '\\'
else:
filesep = '/'
# ========= Useful functions ==============
def read_array(filename):
''' Read array and convert to 2d np arrays '''
array = np.genfromtxt(filename, dtype=float)
if len(array.shape)==1:
array = array.reshape( -1, 1 )
return array
def sanitize_array(array):
''' Replace NaN and Inf (there should not be any!)'''
a=np.ravel(array)
maxi = np.nanmax((filter(lambda x: x != float('inf'), a))) # Max except NaN and Inf
mini = np.nanmin((filter(lambda x: x != float('-inf'), a))) # Mini except NaN and Inf
array[array==float('inf')]=maxi
array[array==float('-inf')]=mini
mid = (maxi + mini)/2
array[np.isnan(array)]=mid
return array
def normalize_array (solution, prediction):
''' Use min and max of solution as scaling factors to normalize prediction,
then threshold it to [0, 1]. Binarize solution to {0, 1}.
This allows applying classification scores to all cases.
In principle, this should not do anything to properly formatted
classification inputs and outputs.'''
# Binarize solution
sol=np.ravel(solution) # convert to 1-d array
maxi = np.nanmax((filter(lambda x: x != float('inf'), sol))) # Max except NaN and Inf
mini = np.nanmin((filter(lambda x: x != float('-inf'), sol))) # Mini except NaN and Inf
if maxi == mini:
print('Warning, cannot normalize')
return [solution, prediction]
diff = maxi - mini
mid = (maxi + mini)/2.
new_solution = np.copy(solution)
new_solution[solution>=mid] = 1
new_solution[solution<mid] = 0
# Normalize and threshold predictions (takes effect only if solution not in {0, 1})
new_prediction = (np.copy(prediction) - float(mini))/float(diff)
new_prediction[new_prediction>1] = 1 # and if predictions exceed the bounds [0, 1]
new_prediction[new_prediction<0] = 0
# Make probabilities smoother
#new_prediction = np.power(new_prediction, (1./10))
return [new_solution, new_prediction]
def binarize_predictions(array, task='binary.classification'):
''' Turn predictions into decisions {0,1} by selecting the class with largest
score for multiclass problems and thresholding at 0.5 for other cases.'''
# add a very small random value as tie breaker (a bit bad because this changes the score every time)
# so to make sure we get the same result every time, we seed it
#eps = 1e-15
#np.random.seed(sum(array.shape))
#array = array + eps*np.random.rand(array.shape[0],array.shape[1])
bin_array = np.zeros(array.shape)
if (task != 'multiclass.classification') or (array.shape[1]==1):
bin_array[array>=0.5] = 1
else:
sample_num=array.shape[0]
for i in range(sample_num):
j = np.argmax(array[i,:])
bin_array[i,j] = 1
return bin_array
def acc_stat (solution, prediction):
''' Return accuracy statistics TN, FP, TP, FN
Assumes that solution and prediction are binary 0/1 vectors.'''
# This uses floats so the results are floats
TN = sum(np.multiply((1-solution), (1-prediction)))
FN = sum(np.multiply(solution, (1-prediction)))
TP = sum(np.multiply(solution, prediction))
FP = sum(np.multiply((1-solution), prediction))
#print "TN =",TN
#print "FP =",FP
#print "TP =",TP
#print "FN =",FN
return (TN, FP, TP, FN)
def tiedrank(a):
''' Return the ranks (with base 1) of a list resolving ties by averaging.
This works for numpy arrays.'''
m=len(a)
# Sort a in ascending order (sa=sorted vals, i=indices)
i=a.argsort()
sa=a[i]
# Find unique values
uval=np.unique(a)
# Test whether there are ties
R=np.arange(m, dtype=float)+1 # Ranks with base 1
if len(uval)!=m:
# Average the ranks for the ties
oldval=sa[0]
newval=sa[0]
k0=0
for k in range(1,m):
newval=sa[k]
if newval==oldval:
# moving average
R[k0:k+1]=R[k-1]*(k-k0)/(k-k0+1)+R[k]/(k-k0+1)
else:
k0=k;
oldval=newval
# Invert the index
S=np.empty(m)
S[i]=R
return S
def mvmean(R, axis=0):
''' Moving average to avoid rounding errors. A bit slow, but...
Computes the mean along the given axis, except if this is a vector, in which case the mean is returned.
Does NOT flatten.'''
if len(R.shape)==0: return R
average = lambda x: reduce(lambda i, j: (0, (j[0]/(j[0]+1.))*i[1]+(1./(j[0]+1))*j[1]), enumerate(x))[1]
R=np.array(R)
if len(R.shape)==1: return average(R)
if axis==1:
return np.array(map(average, R))
else:
return np.array(map(average, R.transpose()))
# ======= All metrics used for scoring in the challenge ========
### REGRESSION METRICS (work on raw solution and prediction)
# These can be computed on all solutions and predictions (classification included)
def r2_metric(solution, prediction, task='regression'):
''' 1 - Mean squared error divided by variance '''
mse = mvmean((solution-prediction)**2)
var = mvmean((solution-mvmean(solution))**2)
score = 1 - mse / var
return mvmean(score)
def a_metric (solution, prediction, task='regression'):
''' 1 - Mean absolute error divided by mean absolute deviation '''
mae = mvmean(np.abs(solution-prediction)) # mean absolute error
mad = mvmean(np.abs(solution-mvmean(solution))) # mean absolute deviation
score = 1 - mae / mad
return mvmean(score)
### END REGRESSION METRICS
### CLASSIFICATION METRICS (work on solutions in {0, 1} and predictions in [0, 1])
# These can be computed for regression scores only after running normalize_array
def bac_metric (solution, prediction, task='binary.classification'):
''' Compute the normalized balanced accuracy. The binarization and
the normalization differ for the multi-label and multi-class case. '''
label_num = solution.shape[1]
score = np.zeros(label_num)
bin_prediction = binarize_predictions(prediction, task)
[tn,fp,tp,fn] = acc_stat(solution, bin_prediction)
# Bounding to avoid division by 0
eps = 1e-15
tp = sp.maximum (eps, tp)
pos_num = sp.maximum (eps, tp+fn)
tpr = tp / pos_num # true positive rate (sensitivity)
if (task != 'multiclass.classification') or (label_num==1):
tn = sp.maximum (eps, tn)
neg_num = sp.maximum (eps, tn+fp)
tnr = tn / neg_num # true negative rate (specificity)
bac = 0.5*(tpr + tnr)
base_bac = 0.5 # random predictions for binary case
else:
bac = tpr
base_bac = 1./label_num # random predictions for multiclass case
bac = mvmean(bac) # average over all classes
# Normalize: 0 for random, 1 for perfect
score = (bac - base_bac) / sp.maximum(eps, (1 - base_bac))
return score
def pac_metric (solution, prediction, task='binary.classification'):
''' Probabilistic Accuracy based on log_loss metric.
We assume the solution is in {0, 1} and prediction in [0, 1].
Otherwise, run normalize_array.'''
debug_flag=False
[sample_num, label_num] = solution.shape
if label_num==1: task='binary.classification'
eps = 1e-15
the_log_loss = log_loss(solution, prediction, task)
# Compute the base log loss (using the prior probabilities)
pos_num = 1.* sum(solution) # float conversion!
frac_pos = pos_num / sample_num # prior proba of positive class
the_base_log_loss = prior_log_loss(frac_pos, task)
# Alternative computation of the same thing (slower)
# Should always return the same thing except in the multi-label case
# For which the analytic solution makes more sense
if debug_flag:
base_prediction = np.empty(prediction.shape)
for k in range(sample_num): base_prediction[k,:] = frac_pos
base_log_loss = log_loss(solution, base_prediction, task)
diff = np.array(abs(the_base_log_loss-base_log_loss))
if len(diff.shape)>0: diff=max(diff)
if(diff)>1e-10:
print('Arrggh {} != {}'.format(the_base_log_loss,base_log_loss))
# Exponentiate to turn into an accuracy-like score.
# In the multi-label case, we need to average AFTER taking the exp
# because it is an NL operation
pac = mvmean(np.exp(-the_log_loss))
base_pac = mvmean(np.exp(-the_base_log_loss))
# Normalize: 0 for random, 1 for perfect
score = (pac - base_pac) / sp.maximum(eps, (1 - base_pac))
return score
def f1_metric (solution, prediction, task='binary.classification'):
''' Compute the normalized f1 measure. The binarization differs
for the multi-label and multi-class case.
A non-weighted average over classes is taken.
The score is normalized.'''
label_num = solution.shape[1]
score = np.zeros(label_num)
bin_prediction = binarize_predictions(prediction, task)
[tn,fp,tp,fn] = acc_stat(solution, bin_prediction)
# Bounding to avoid division by 0
eps = 1e-15
true_pos_num = sp.maximum (eps, tp+fn)
found_pos_num = sp.maximum (eps, tp+fp)
tp = sp.maximum (eps, tp)
tpr = tp / true_pos_num # true positive rate (recall)
ppv = tp / found_pos_num # positive predictive value (precision)
arithmetic_mean = 0.5 * sp.maximum (eps, tpr+ppv)
# Harmonic mean:
f1 = tpr*ppv/arithmetic_mean
# Average over all classes
f1 = mvmean(f1)
# Normalize: 0 for random, 1 for perfect
if (task != 'multiclass.classification') or (label_num==1):
# How to choose the "base_f1"?
# For the binary/multilabel classification case, one may want to predict all 1.
# In that case tpr = 1 and ppv = frac_pos. f1 = 2 * frac_pos / (1+frac_pos)
# frac_pos = mvmean(solution.ravel())
# base_f1 = 2 * frac_pos / (1+frac_pos)
# or predict random values with probability 0.5, in which case
# base_f1 = 0.5
# the first solution is better only if frac_pos > 1/3.
# The solution in which we predict according to the class prior frac_pos gives
# f1 = tpr = ppv = frac_pos, which is worse than 0.5 if frac_pos<0.5
# So, because the f1 score is used if frac_pos is small (typically <0.1)
# the best is to assume that base_f1=0.5
base_f1 = 0.5
# For the multiclass case, this is not possible (though it does not make much sense to
# use f1 for multiclass problems), so the best would be to assign values at random to get
# tpr=ppv=frac_pos, where frac_pos=1/label_num
else:
base_f1=1./label_num
score = (f1 - base_f1) / sp.maximum(eps, (1 - base_f1))
return score
def auc_metric(solution, prediction, task='binary.classification'):
''' Normarlized Area under ROC curve (AUC).
Return Gini index = 2*AUC-1 for binary classification problems.
Should work for a vector of binary 0/1 (or -1/1)"solution" and any discriminant values
for the predictions. If solution and prediction are not vectors, the AUC
of the columns of the matrices are computed and averaged (with no weight).
The same for all classification problems (in fact it treats well only the
binary and multilabel classification problems).'''
#auc = metrics.roc_auc_score(solution, prediction, average=None)
# There is a bug in metrics.roc_auc_score: auc([1,0,0],[1e-10,0,0]) incorrect
label_num=solution.shape[1]
auc=np.empty(label_num)
for k in range(label_num):
r_ = tiedrank(prediction[:,k])
s_ = solution[:,k]
if sum(s_)==0: print('WARNING: no positive class example in class {}'.format(k+1))
npos = sum(s_==1)
nneg = sum(s_<1)
auc[k] = (sum(r_[s_==1]) - npos*(npos+1)/2) / (nneg*npos)
return 2*mvmean(auc)-1
### END CLASSIFICATION METRICS
# ======= Specialized scores ========
# We run all of them for all tasks even though they don't make sense for some tasks
def nbac_binary_score(solution, prediction):
''' Normalized balanced accuracy for binary and multilabel classification '''
return bac_metric (solution, prediction, task='binary.classification')
def nbac_multiclass_score(solution, prediction):
''' Multiclass accuracy for binary and multilabel classification '''
return bac_metric (solution, prediction, task='multiclass.classification')
def npac_binary_score(solution, prediction):
''' Normalized balanced accuracy for binary and multilabel classification '''
return pac_metric (solution, prediction, task='binary.classification')
def npac_multiclass_score(solution, prediction):
''' Multiclass accuracy for binary and multilabel classification '''
return pac_metric (solution, prediction, task='multiclass.classification')
def f1_binary_score(solution, prediction):
''' Normalized balanced accuracy for binary and multilabel classification '''
return f1_metric (solution, prediction, task='binary.classification')
def f1_multiclass_score(solution, prediction):
''' Multiclass accuracy for binary and multilabel classification '''
return f1_metric (solution, prediction, task='multiclass.classification')
def log_loss(solution, prediction, task = 'binary.classification'):
''' Log loss for binary and multiclass. '''
[sample_num, label_num] = solution.shape
eps = 1e-15
pred = np.copy(prediction) # beware: changes in prediction occur through this
sol = np.copy(solution)
if (task == 'multiclass.classification') and (label_num>1):
# Make sure the lines add up to one for multi-class classification
norma = np.sum(prediction, axis=1)
for k in range(sample_num):
pred[k,:] /= sp.maximum (norma[k], eps)
# Make sure there is a single label active per line for multi-class classification
sol = binarize_predictions(solution, task='multiclass.classification')
# For the base prediction, this solution is ridiculous in the multi-label case
# Bounding of predictions to avoid log(0),1/0,...
pred = sp.minimum (1-eps, sp.maximum (eps, pred))
# Compute the log loss
pos_class_log_loss = - mvmean(sol*np.log(pred), axis=0)
if (task != 'multiclass.classification') or (label_num==1):
# The multi-label case is a bunch of binary problems.
# The second class is the negative class for each column.
neg_class_log_loss = - mvmean((1-sol)*np.log(1-pred), axis=0)
log_loss = pos_class_log_loss + neg_class_log_loss
# Each column is an independent problem, so we average.
# The probabilities in one line do not add up to one.
# log_loss = mvmean(log_loss)
# print('binary {}'.format(log_loss))
# In the multilabel case, the right thing i to AVERAGE not sum
# We return all the scores so we can normalize correctly later on
else:
# For the multiclass case the probabilities in one line add up one.
log_loss = pos_class_log_loss
# We sum the contributions of the columns.
log_loss = np.sum(log_loss)
#print('multiclass {}'.format(log_loss))
return log_loss
def prior_log_loss(frac_pos, task = 'binary.classification'):
''' Baseline log loss. For multiplr classes ot labels return the volues for each column'''
eps = 1e-15
frac_pos_ = sp.maximum (eps, frac_pos)
if (task != 'multiclass.classification'): # binary case
frac_neg = 1-frac_pos
frac_neg_ = sp.maximum (eps, frac_neg)
pos_class_log_loss_ = - frac_pos * np.log(frac_pos_)
neg_class_log_loss_ = - frac_neg * np.log(frac_neg_)
base_log_loss = pos_class_log_loss_ + neg_class_log_loss_
# base_log_loss = mvmean(base_log_loss)
# print('binary {}'.format(base_log_loss))
# In the multilabel case, the right thing i to AVERAGE not sum
# We return all the scores so we can normalize correctly later on
else: # multiclass case
fp = frac_pos_ / sum(frac_pos_) # Need to renormalize the lines in multiclass case
# Only ONE label is 1 in the multiclass case active for each line
pos_class_log_loss_ = - frac_pos * np.log(fp)
base_log_loss = np.sum(pos_class_log_loss_)
return base_log_loss
# sklearn implementations for comparison
def log_loss_(solution, prediction):
return metrics.log_loss(solution, prediction)
def r2_score_(solution, prediction):
return metrics.r2_score(solution, prediction)
def a_score_(solution, prediction):
mad = float(mvmean(abs(solution-mvmean(solution))))
return 1 - metrics.mean_absolute_error(solution, prediction)/mad
def auc_score_(solution, prediction):
auc = metrics.roc_auc_score(solution, prediction, average=None)
return mvmean(auc)
### SOME I/O functions
def ls(filename):
return sorted(glob(filename))
def write_list(lst):
for item in lst:
swrite(item + "\n")
def mkdir(d):
if not os.path.exists(d):
os.makedirs(d)
def get_info (filename):
''' Get all information {attribute = value} pairs from the public.info file'''
info={}
with open (filename, "r") as info_file:
lines = info_file.readlines()
features_list = list(map(lambda x: tuple(x.strip("\'").split(" = ")), lines))
for (key, value) in features_list:
info[key] = value.rstrip().strip("'").strip(' ')
if info[key].isdigit(): # if we have a number, we want it to be an integer
info[key] = int(info[key])
return info
def show_io(input_dir, output_dir):
''' show directory structure and inputs and autputs to scoring program'''
swrite('\n=== DIRECTORIES ===\n\n')
# Show this directory
swrite("-- Current directory " + pwd() + ":\n")
write_list(ls('.'))
write_list(ls('./*'))
write_list(ls('./*/*'))
swrite("\n")
# List input and output directories
swrite("-- Input directory " + input_dir + ":\n")
write_list(ls(input_dir))
write_list(ls(input_dir + '/*'))
write_list(ls(input_dir + '/*/*'))
write_list(ls(input_dir + '/*/*/*'))
swrite("\n")
swrite("-- Output directory " + output_dir + ":\n")
write_list(ls(output_dir))
write_list(ls(output_dir + '/*'))
swrite("\n")
# write meta data to sdterr
swrite('\n=== METADATA ===\n\n')
swrite("-- Current directory " + pwd() + ":\n")
try:
metadata = yaml.load(open('metadata', 'r'))
for key,value in metadata.items():
swrite(key + ': ')
swrite(str(value) + '\n')
except:
swrite("none\n");
swrite("-- Input directory " + input_dir + ":\n")
try:
metadata = yaml.load(open(os.path.join(input_dir, 'metadata'), 'r'))
for key,value in metadata.items():
swrite(key + ': ')
swrite(str(value) + '\n')
swrite("\n")
except:
swrite("none\n");
def show_version(scoring_version):
''' Python version and library versions '''
swrite('\n=== VERSIONS ===\n\n')
# Scoring program version
swrite("Scoring program version: " + str(scoring_version) + "\n\n")
# Python version
swrite("Python version: " + version + "\n\n")
# Give information on the version installed
swrite("Versions of libraries installed:\n")
map(swrite, sorted(["%s==%s\n" % (i.key, i.version) for i in lib()]))
def show_platform():
''' Show information on platform'''
swrite('\n=== SYSTEM ===\n\n')
try:
linux_distribution = platform.linux_distribution()
except:
linux_distribution = "N/A"
swrite("""
dist: %s
linux_distribution: %s
system: %s
machine: %s
platform: %s
uname: %s
version: %s
mac_ver: %s
memory: %s
number of CPU: %s
""" % (
str(platform.dist()),
linux_distribution,
platform.system(),
platform.machine(),
platform.platform(),
platform.uname(),
platform.version(),
platform.mac_ver(),
psutil.virtual_memory(),
str(psutil.cpu_count())
))
def compute_all_scores(solution, prediction):
''' Compute all the scores and return them as a dist'''
missing_score = -0.999999
scoring = {'BAC (multilabel)':nbac_binary_score,
'BAC (multiclass)':nbac_multiclass_score,
'F1 (multilabel)':f1_binary_score,
'F1 (multiclass)':f1_multiclass_score,
'Regression ABS ':a_metric,
'Regression R2 ':r2_metric,
'AUC (multilabel)':auc_metric,
'PAC (multilabel)':npac_binary_score,
'PAC (multiclass)':npac_multiclass_score}
# Normalize/sanitize inputs
[csolution, cprediction] = normalize_array (solution, prediction)
solution = sanitize_array (solution); prediction = sanitize_array (prediction)
# Compute all scores
score_names = sorted(scoring.keys())
scores = {}
for key in score_names:
scoring_func = scoring[key]
try:
if key=='Regression R2 ' or key=='Regression ABS ':
scores[key] = scoring_func(solution, prediction)
else:
scores[key] = scoring_func(csolution, cprediction)
except:
scores[key] = missing_score
return scores
def write_scores(fp, scores):
''' Write scores to file opened under file pointer fp'''
for key in scores.keys():
fp.write("%s --> %s\n" % (key, scores[key]))
print(key + " --> " + str(scores[key]))
def show_all_scores(solution, prediction):
''' Compute and display all the scores for debug purposes'''
scores = compute_all_scores(solution, prediction)
for key in scores.keys():
print(key + " --> " + str(scores[key]))
############################### TEST PROGRAM ##########################################
if __name__=="__main__":
# This shows a bug in metrics.roc_auc_score
# print('\n\nBug in sklearn.metrics.roc_auc_score:')
# print('auc([1,0,0],[1e-10,0,0])=1')
# print('Correct (ours): ' +str(auc_metric(np.array([[1,0,0]]).transpose(),np.array([[1e-10,0,0]]).transpose())))
# print('Incorrect (sklearn): ' +str(metrics.roc_auc_score(np.array([1,0,0]),np.array([1e-10,0,0]))))
# This checks the binary and multi-class cases are well implemented
# In the 2-class case, all results should be identical, except for f1 because
# this is a score that is not symmetric in the 2 classes.
eps = 1e-15
print('\n\nBinary score verification:')
print('\n\n==========================')
sol0 = np.array([[1, 0],[1, 0],[0, 1],[0, 1]])
comment = ['PERFECT']
Pred = [sol0]
Sol = [sol0]
comment.append('ANTI-PERFECT, very bad for r2_score')
Pred.append(1-sol0)
Sol.append(sol0)
comment.append('UNEVEN PROBA, BUT BINARIZED VERSION BALANCED (bac and auc=0.5)')
Pred.append(np.array([[0.7, 0.3],[0.4, 0.6],[0.49, 0.51],[0.2, 0.8]])) # here is we have only 2, pac not 0 in uni-col
Sol.append(sol0)
comment.append('PROBA=0.5, TIES BROKEN WITH SMALL VALUE TO EVEN THE BINARIZED VERSION')
Pred.append(np.array([[0.5+eps, 0.5-eps],[0.5-eps, 0.5+eps],[0.5+eps, 0.5-eps],[0.5-eps, 0.5+eps]]))
Sol.append(sol0)
comment.append('PROBA=0.5, TIES NOT BROKEN (bad for f1 score)')
Pred.append(np.array([[0.5, 0.5],[0.5, 0.5],[0.5, 0.5],[0.5, 0.5]]))
Sol.append(sol0)
sol1 = np.array([[1, 0],[0, 1],[0, 1]])
comment.append('EVEN PROBA, but wrong PAC prior because uneven number of samples')
Pred.append(np.array([[0.5, 0.5],[0.5, 0.5],[0.5, 0.5]]))
Sol.append(sol1)
comment.append('Correct PAC prior; score generally 0. But 100% error on positive class because of binarization so f1 (1 col) is at its worst.')
p=len(sol1)
Pred.append(np.array([sum(sol1)*1./p]*p))
Sol.append(sol1)
comment.append('All positive')
Pred.append(np.array([[1, 1],[1, 1],[1, 1]]))
Sol.append(sol1)
comment.append('All negative')
Pred.append(np.array([[0, 0],[0, 0],[0, 0]]))
Sol.append(sol1)
for k in range(len(Sol)):
sol = Sol[k]
pred= Pred[k]
print('****** ({}) {} ******'.format(k, comment[k]))
print('------ 2 columns ------')
show_all_scores(sol, pred)
print('------ 1 column ------')
sol=np.array([sol[:,0]]).transpose()
pred=np.array([pred[:,0]]).transpose()
show_all_scores(sol, pred)
print('\n\nMulticlass score verification:')
print('\n\n==========================')
sol2 = np.array([[1, 0, 0],[0, 1, 0],[1, 0, 0], [1, 0, 0]])
comment = ['Three classes perfect']
Pred = [sol2]
Sol = [sol2]
comment.append('Three classes all wrong')
Pred.append(np.array([[0, 1, 0],[0, 0, 1],[0, 1, 0], [0, 0, 1]]))
Sol.append(sol2)
comment.append('Three classes equi proba')
Pred.append(np.array([[1/3, 1/3, 1/3],[1/3, 1/3, 1/3],[1/3, 1/3, 1/3], [1/3, 1/3, 1/3]]))
Sol.append(sol2)
comment.append('Three classes some proba that do not add up')
Pred.append(np.array([[0.2, 0, 0.5],[0.8, 0.4, 0.1],[0.9, 0.1, 0.2], [0.7, 0.3, 0.3]]))
Sol.append(sol2)
comment.append('Three classes predict prior')
Pred.append(np.array([[ 0.75, 0.25, 0. ],[ 0.75, 0.25, 0. ],[ 0.75, 0.25, 0. ], [ 0.75, 0.25, 0. ]]))
Sol.append(sol2)
for k in range(len(Sol)):
sol = Sol[k]
pred= Pred[k]
print('****** ({}) {} ******'.format(k, comment[k]))
show_all_scores(sol, pred)
print('\n\nMulti-label score verification: 1) all identical labels')
print('\n\n=======================================================')
print('\nIt is normal that for more then 2 labels the results are different for the multiclass scores.')
print('\nBut they should be indetical for the multilabel scores.')
num=2
sol=np.array([[1, 1, 1],[0, 0, 0],[0, 0, 0], [0, 0, 0]])
sol3 = sol[:,0:num]
if num==1:
sol3=np.array([sol3[:,0]]).transpose()
comment = ['{} labels perfect'.format(num)]
Pred = [sol3]
Sol = [sol3]
comment.append('All wrong, in the multi-label sense')
Pred.append(1-sol3)
Sol.append(sol3)
comment.append('All equi proba: 0.5')
sol=np.array([[0.5, 0.5, 0.5],[0.5, 0.5, 0.5],[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]])
if num==1:
Pred.append(np.array([sol[:,0]]).transpose())
else:
Pred.append(sol[:,0:num])
Sol.append(sol3)
comment.append('All equi proba, prior: 0.25')
sol=np.array([[ 0.25, 0.25, 0.25 ],[ 0.25, 0.25, 0.25 ],[ 0.25, 0.25, 0.25 ], [ 0.25, 0.25, 0.25 ]])
if num==1:
Pred.append(np.array([sol[:,0]]).transpose())
else:
Pred.append(sol[:,0:num])
Sol.append(sol3)
comment.append('Some proba')
sol=np.array([[0.2, 0.2, 0.2],[0.8, 0.8, 0.8],[0.9, 0.9, 0.9], [0.7, 0.7, 0.7]])
if num==1:
Pred.append(np.array([sol[:,0]]).transpose())
else:
Pred.append(sol[:,0:num])
Sol.append(sol3)
comment.append('Invert both solution and prediction')
if num==1:
Pred.append(np.array([sol[:,0]]).transpose())
else:
Pred.append(sol[:,0:num])
Sol.append(1-sol3)
for k in range(len(Sol)):
sol = Sol[k]
pred= Pred[k]
print('****** ({}) {} ******'.format(k, comment[k]))
show_all_scores(sol, pred)
print('\n\nMulti-label score verification:')
print('\n\n==========================')
sol4 = np.array([[1, 0, 0],[0, 1, 0],[0, 0, 1], [0, 0, 1]])
comment = ['Three labels perfect']
Pred = [sol4]
Sol = [sol4]
comment.append('Three classes all wrong, in the multi-label sense')
Pred.append(1-sol4)
Sol.append(sol4)
comment.append('Three classes equi proba')
Pred.append(np.array([[1/3, 1/3, 1/3],[1/3, 1/3, 1/3],[1/3, 1/3, 1/3], [1/3, 1/3, 1/3]]))
Sol.append(sol4)
comment.append('Three classes some proba that do not add up')
Pred.append(np.array([[0.2, 0, 0.5],[0.8, 0.4, 0.1],[0.9, 0.1, 0.2], [0.7, 0.3, 0.3]]))
Sol.append(sol4)
comment.append('Three classes predict prior')
Pred.append(np.array([[ 0.25, 0.25, 0.5 ],[ 0.25, 0.25, 0.5 ],[ 0.25, 0.25, 0.5 ], [ 0.25, 0.25, 0.5 ]]))
Sol.append(sol4)
for k in range(len(Sol)):
sol = Sol[k]
pred= Pred[k]
print('****** ({}) {} ******'.format(k, comment[k]))
show_all_scores(sol, pred)
| mit |
bioinf-jku/SNNs | figure1/run.py | 1 | 6774 | #!/usr/bin/env python3
import os
from sklearn.preprocessing import StandardScaler
import biutils # used to load the dataset
import utils
def model(dataset, n_layers, n_hidden, activation, dropout_rate, use_batchnorm):
x_tr, y_tr, x_va, y_va = biutils.load_dataset(dataset)
s = StandardScaler()
s.fit(x_tr)
x_tr = s.transform(x_tr)
x_va = s.transform(x_va)
if n_hidden == -1: # use as many hidden# as there are input features
n_hidden = x_tr.shape[1]
if activation == 'relu':
act_fn = tf.nn.relu
init_scale = 2.0
elif activation == 'tanh':
act_fn = tf.nn.tanh
init_scale = 1.0
elif activation == 'selu':
act_fn = utils.selu
init_scale = 1.0
else:
assert False, "Unknown activation"
tf.reset_default_graph()
x = tf.placeholder(np.float32, [None, x_tr.shape[1]], name="x")
y = tf.placeholder(np.float32, [None, y_tr.shape[1]], name="y")
is_training = tf.placeholder_with_default(tf.constant(False, tf.bool), shape=[], name='is_training')
h = x
if dropout_rate > 0.0:
h = tf.layers.dropout(h, 0.2, training=is_training)
for i in range(n_layers):
s = np.sqrt(init_scale/h.get_shape().as_list()[1])
init = tf.random_normal_initializer(stddev=s)
h = tf.layers.dense(h, n_hidden, activation=act_fn, name='layer%d' % i, kernel_initializer=init)
if use_batchnorm:
h = tf.layers.batch_normalization(h, training=is_training)
if dropout_rate > 0.0:
h = tf.layers.dropout(h, dropout_rate, training=is_training)
with tf.variable_scope('output_layer') as scope:
o = tf.layers.dense(h, y_tr.shape[1], activation=None, name=scope)
scope.reuse_variables()
return (x_tr, y_tr, x_va, y_va), (x, y, is_training), o
def run(n_layers, n_hidden, n_epochs, learning_rate, dataset, activation, logdir_base='/tmp',
batch_size=64, dropout_rate=0.0, use_batchnorm=False):
ld = '%s%s_d%02d_h%d_l%1.0e_%s' % (activation,
'bn' if use_batchnorm else '',
n_layers, n_hidden, learning_rate,
utils.get_timestamp())
logdir = os.path.join(logdir_base, dataset, ld)
print(logdir)
dataset, variables, logits = model(dataset, n_layers, n_hidden, activation, dropout_rate, use_batchnorm)
x_tr, y_tr, x_va, y_va = dataset
x, y, is_training = variables
prob_op = tf.nn.softmax(logits)
loss_op = tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=logits)
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
variables_to_train = tf.trainable_variables()
grads = optimizer.compute_gradients(loss_op, variables_to_train)
global_step = tf.train.get_global_step()
train_op = optimizer.apply_gradients(grads, global_step=global_step)
loss_val = tf.Variable(0.0, trainable=False, dtype=np.float32)
acc_op, acc_upd = tf.metrics.accuracy(tf.argmax(y, 1), tf.argmax(prob_op, 1), name='accuracy')
acc_tr_op = tf.summary.scalar('acc_tr', acc_op)
acc_va_op = tf.summary.scalar('acc_va', acc_op)
loss_tr_op = tf.summary.scalar('loss_tr', loss_val / x_tr.shape[0])
loss_va_op = tf.summary.scalar('loss_va', loss_val / x_va.shape[0])
metric_vars = [i for i in tf.local_variables() if i.name.split('/')[0] == 'accuracy']
reset_op = [tf.variables_initializer(metric_vars), loss_val.assign(0.0)]
loss_upd = loss_val.assign_add(tf.reduce_sum(loss_op))
smry_tr = tf.summary.merge([acc_tr_op, loss_tr_op])
smry_va = tf.summary.merge([acc_va_op, loss_va_op])
config = tf.ConfigProto(intra_op_parallelism_threads=2,
use_per_session_threads=True,
gpu_options = tf.GPUOptions(allow_growth=True)
)
with tf.Session(config=config) as sess:
log = tf.summary.FileWriter(logdir, sess.graph)
saver = tf.train.Saver(max_to_keep=100)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
fd_tr = {is_training: True}
for cur_epoch in range(n_epochs):
# get stats over whole training set
for fd in utils.generate_minibatches(batch_size, [x, y], [x_tr, y_tr], feed_dict=fd_tr, shuffle=False):
sess.run([acc_upd, loss_upd], feed_dict=fd)
log.add_summary(sess.run(smry_tr, feed_dict=fd), cur_epoch)
sess.run(reset_op)
# training
for fd in utils.generate_minibatches(batch_size, [x, y], [x_tr, y_tr], feed_dict=fd_tr):
sess.run([train_op], feed_dict=fd)
# validation
for fd in utils.generate_minibatches(batch_size, [x, y], [x_va, y_va], shuffle=False):
sess.run([acc_upd, loss_upd], feed_dict=fd)
smry, acc = sess.run([smry_va, acc_op])
log.add_summary(smry, cur_epoch)
sess.run(reset_op)
print("%3d: %3.3f" % (cur_epoch, acc), flush=True)
if cur_epoch % 250 == 0 and cur_epoch > 0:
saver.save(sess, os.path.join(logdir, 'model'), global_step=cur_epoch)
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("-n", "--nhidden", type=int, help='hidden units (-1: use input size)', default=-1)
parser.add_argument("-d", "--depth", type=int, help='number of hidden layers', default=3)
parser.add_argument("-a", "--activation", choices=['relu', 'selu', 'tanh'], default='relu')
parser.add_argument("-b", "--batchsize", type=int, help='batch size', default=128)
parser.add_argument("-e", "--epochs", type=int, help='number of training epochs', default=30)
parser.add_argument("-l", "--learningrate", type=float, help='learning rate', default=1e-5)
parser.add_argument("-g", "--gpuid", type=str, help='GPU to use (leave blank for CPU only)', default="")
parser.add_argument("--batchnorm", help='use batchnorm', action="store_true")
parser.add_argument("--dropout", type=float, help='hidden dropout rate (implies input-dropout of 0.2)', default=0.0)
parser.add_argument("--dataset", type=str, help='name of dataset', default='mnist_bgimg')
parser.add_argument("--logdir", type=str, help='directory for TF logs and summaries', default="logs")
# by parsing the arguments already, we can bail out now instead of waiting
# for TF to load, in case the arguments aren't ok
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpuid
import numpy as np
import tensorflow as tf
logdir_base = os.getcwd()
run(args.depth, args.nhidden, args.epochs, args.learningrate, args.dataset,
args.activation, args.logdir, args.batchsize, args.dropout, args.batchnorm)
| gpl-3.0 |
huongttlan/seaborn | seaborn/tests/test_axisgrid.py | 11 | 42805 | import warnings
import numpy as np
import pandas as pd
from scipy import stats
import matplotlib as mpl
import matplotlib.pyplot as plt
from distutils.version import LooseVersion
import nose.tools as nt
import numpy.testing as npt
from numpy.testing.decorators import skipif
import pandas.util.testing as tm
from .. import axisgrid as ag
from .. import rcmod
from ..palettes import color_palette
from ..distributions import kdeplot
from ..categorical import pointplot
from ..linearmodels import pairplot
from ..utils import categorical_order
rs = np.random.RandomState(0)
old_matplotlib = LooseVersion(mpl.__version__) < "1.4"
class TestFacetGrid(object):
df = pd.DataFrame(dict(x=rs.normal(size=60),
y=rs.gamma(4, size=60),
a=np.repeat(list("abc"), 20),
b=np.tile(list("mn"), 30),
c=np.tile(list("tuv"), 20),
d=np.tile(list("abcdefghij"), 6)))
def test_self_data(self):
g = ag.FacetGrid(self.df)
nt.assert_is(g.data, self.df)
plt.close("all")
def test_self_fig(self):
g = ag.FacetGrid(self.df)
nt.assert_is_instance(g.fig, plt.Figure)
plt.close("all")
def test_self_axes(self):
g = ag.FacetGrid(self.df, row="a", col="b", hue="c")
for ax in g.axes.flat:
nt.assert_is_instance(ax, plt.Axes)
plt.close("all")
def test_axes_array_size(self):
g1 = ag.FacetGrid(self.df)
nt.assert_equal(g1.axes.shape, (1, 1))
g2 = ag.FacetGrid(self.df, row="a")
nt.assert_equal(g2.axes.shape, (3, 1))
g3 = ag.FacetGrid(self.df, col="b")
nt.assert_equal(g3.axes.shape, (1, 2))
g4 = ag.FacetGrid(self.df, hue="c")
nt.assert_equal(g4.axes.shape, (1, 1))
g5 = ag.FacetGrid(self.df, row="a", col="b", hue="c")
nt.assert_equal(g5.axes.shape, (3, 2))
for ax in g5.axes.flat:
nt.assert_is_instance(ax, plt.Axes)
plt.close("all")
def test_single_axes(self):
g1 = ag.FacetGrid(self.df)
nt.assert_is_instance(g1.ax, plt.Axes)
g2 = ag.FacetGrid(self.df, row="a")
with nt.assert_raises(AttributeError):
g2.ax
g3 = ag.FacetGrid(self.df, col="a")
with nt.assert_raises(AttributeError):
g3.ax
g4 = ag.FacetGrid(self.df, col="a", row="b")
with nt.assert_raises(AttributeError):
g4.ax
def test_col_wrap(self):
g = ag.FacetGrid(self.df, col="d")
nt.assert_equal(g.axes.shape, (1, 10))
nt.assert_is(g.facet_axis(0, 8), g.axes[0, 8])
g_wrap = ag.FacetGrid(self.df, col="d", col_wrap=4)
nt.assert_equal(g_wrap.axes.shape, (10,))
nt.assert_is(g_wrap.facet_axis(0, 8), g_wrap.axes[8])
nt.assert_equal(g_wrap._ncol, 4)
nt.assert_equal(g_wrap._nrow, 3)
with nt.assert_raises(ValueError):
g = ag.FacetGrid(self.df, row="b", col="d", col_wrap=4)
df = self.df.copy()
df.loc[df.d == "j"] = np.nan
g_missing = ag.FacetGrid(df, col="d")
nt.assert_equal(g_missing.axes.shape, (1, 9))
g_missing_wrap = ag.FacetGrid(df, col="d", col_wrap=4)
nt.assert_equal(g_missing_wrap.axes.shape, (9,))
plt.close("all")
def test_normal_axes(self):
null = np.empty(0, object).flat
g = ag.FacetGrid(self.df)
npt.assert_array_equal(g._bottom_axes, g.axes.flat)
npt.assert_array_equal(g._not_bottom_axes, null)
npt.assert_array_equal(g._left_axes, g.axes.flat)
npt.assert_array_equal(g._not_left_axes, null)
npt.assert_array_equal(g._inner_axes, null)
g = ag.FacetGrid(self.df, col="c")
npt.assert_array_equal(g._bottom_axes, g.axes.flat)
npt.assert_array_equal(g._not_bottom_axes, null)
npt.assert_array_equal(g._left_axes, g.axes[:, 0].flat)
npt.assert_array_equal(g._not_left_axes, g.axes[:, 1:].flat)
npt.assert_array_equal(g._inner_axes, null)
g = ag.FacetGrid(self.df, row="c")
npt.assert_array_equal(g._bottom_axes, g.axes[-1, :].flat)
npt.assert_array_equal(g._not_bottom_axes, g.axes[:-1, :].flat)
npt.assert_array_equal(g._left_axes, g.axes.flat)
npt.assert_array_equal(g._not_left_axes, null)
npt.assert_array_equal(g._inner_axes, null)
g = ag.FacetGrid(self.df, col="a", row="c")
npt.assert_array_equal(g._bottom_axes, g.axes[-1, :].flat)
npt.assert_array_equal(g._not_bottom_axes, g.axes[:-1, :].flat)
npt.assert_array_equal(g._left_axes, g.axes[:, 0].flat)
npt.assert_array_equal(g._not_left_axes, g.axes[:, 1:].flat)
npt.assert_array_equal(g._inner_axes, g.axes[:-1, 1:].flat)
plt.close("all")
def test_wrapped_axes(self):
null = np.empty(0, object).flat
g = ag.FacetGrid(self.df, col="a", col_wrap=2)
npt.assert_array_equal(g._bottom_axes,
g.axes[np.array([1, 2])].flat)
npt.assert_array_equal(g._not_bottom_axes, g.axes[:1].flat)
npt.assert_array_equal(g._left_axes, g.axes[np.array([0, 2])].flat)
npt.assert_array_equal(g._not_left_axes, g.axes[np.array([1])].flat)
npt.assert_array_equal(g._inner_axes, null)
plt.close("all")
def test_figure_size(self):
g = ag.FacetGrid(self.df, row="a", col="b")
npt.assert_array_equal(g.fig.get_size_inches(), (6, 9))
g = ag.FacetGrid(self.df, row="a", col="b", size=6)
npt.assert_array_equal(g.fig.get_size_inches(), (12, 18))
g = ag.FacetGrid(self.df, col="c", size=4, aspect=.5)
npt.assert_array_equal(g.fig.get_size_inches(), (6, 4))
plt.close("all")
def test_figure_size_with_legend(self):
g1 = ag.FacetGrid(self.df, col="a", hue="c", size=4, aspect=.5)
npt.assert_array_equal(g1.fig.get_size_inches(), (6, 4))
g1.add_legend()
nt.assert_greater(g1.fig.get_size_inches()[0], 6)
g2 = ag.FacetGrid(self.df, col="a", hue="c", size=4, aspect=.5,
legend_out=False)
npt.assert_array_equal(g2.fig.get_size_inches(), (6, 4))
g2.add_legend()
npt.assert_array_equal(g2.fig.get_size_inches(), (6, 4))
plt.close("all")
def test_legend_data(self):
g1 = ag.FacetGrid(self.df, hue="a")
g1.map(plt.plot, "x", "y")
g1.add_legend()
palette = color_palette(n_colors=3)
nt.assert_equal(g1._legend.get_title().get_text(), "a")
a_levels = sorted(self.df.a.unique())
lines = g1._legend.get_lines()
nt.assert_equal(len(lines), len(a_levels))
for line, hue in zip(lines, palette):
nt.assert_equal(line.get_color(), hue)
labels = g1._legend.get_texts()
nt.assert_equal(len(labels), len(a_levels))
for label, level in zip(labels, a_levels):
nt.assert_equal(label.get_text(), level)
plt.close("all")
def test_legend_data_missing_level(self):
g1 = ag.FacetGrid(self.df, hue="a", hue_order=list("azbc"))
g1.map(plt.plot, "x", "y")
g1.add_legend()
b, g, r, p = color_palette(n_colors=4)
palette = [b, r, p]
nt.assert_equal(g1._legend.get_title().get_text(), "a")
a_levels = sorted(self.df.a.unique())
lines = g1._legend.get_lines()
nt.assert_equal(len(lines), len(a_levels))
for line, hue in zip(lines, palette):
nt.assert_equal(line.get_color(), hue)
labels = g1._legend.get_texts()
nt.assert_equal(len(labels), 4)
for label, level in zip(labels, list("azbc")):
nt.assert_equal(label.get_text(), level)
plt.close("all")
def test_get_boolean_legend_data(self):
self.df["b_bool"] = self.df.b == "m"
g1 = ag.FacetGrid(self.df, hue="b_bool")
g1.map(plt.plot, "x", "y")
g1.add_legend()
palette = color_palette(n_colors=2)
nt.assert_equal(g1._legend.get_title().get_text(), "b_bool")
b_levels = list(map(str, categorical_order(self.df.b_bool)))
lines = g1._legend.get_lines()
nt.assert_equal(len(lines), len(b_levels))
for line, hue in zip(lines, palette):
nt.assert_equal(line.get_color(), hue)
labels = g1._legend.get_texts()
nt.assert_equal(len(labels), len(b_levels))
for label, level in zip(labels, b_levels):
nt.assert_equal(label.get_text(), level)
plt.close("all")
def test_legend_options(self):
g1 = ag.FacetGrid(self.df, hue="b")
g1.map(plt.plot, "x", "y")
g1.add_legend()
def test_legendout_with_colwrap(self):
g = ag.FacetGrid(self.df, col="d", hue='b',
col_wrap=4, legend_out=False)
g.map(plt.plot, "x", "y", linewidth=3)
g.add_legend()
def test_subplot_kws(self):
g = ag.FacetGrid(self.df, subplot_kws=dict(axisbg="blue"))
for ax in g.axes.flat:
nt.assert_equal(ax.get_axis_bgcolor(), "blue")
@skipif(old_matplotlib)
def test_gridspec_kws(self):
ratios = [3, 1, 2]
sizes = [0.46, 0.15, 0.31]
gskws = dict(width_ratios=ratios, height_ratios=ratios)
g = ag.FacetGrid(self.df, col='c', row='a', gridspec_kws=gskws)
# clear out all ticks
for ax in g.axes.flat:
ax.set_xticks([])
ax.set_yticks([])
g.fig.tight_layout()
widths, heights = np.meshgrid(sizes, sizes)
for n, ax in enumerate(g.axes.flat):
npt.assert_almost_equal(
ax.get_position().width,
widths.flatten()[n],
decimal=2
)
npt.assert_almost_equal(
ax.get_position().height,
heights.flatten()[n],
decimal=2
)
@skipif(old_matplotlib)
def test_gridspec_kws_col_wrap(self):
ratios = [3, 1, 2, 1, 1]
sizes = [0.46, 0.15, 0.31]
gskws = dict(width_ratios=ratios)
with warnings.catch_warnings():
warnings.resetwarnings()
warnings.simplefilter("always")
npt.assert_warns(UserWarning, ag.FacetGrid, self.df, col='d',
col_wrap=5, gridspec_kws=gskws)
@skipif(not old_matplotlib)
def test_gridsic_kws_old_mpl(self):
ratios = [3, 1, 2]
sizes = [0.46, 0.15, 0.31]
gskws = dict(width_ratios=ratios, height_ratios=ratios)
with warnings.catch_warnings():
warnings.resetwarnings()
warnings.simplefilter("always")
npt.assert_warns(UserWarning, ag.FacetGrid, self.df, col='c',
row='a', gridspec_kws=gskws)
def test_data_generator(self):
g = ag.FacetGrid(self.df, row="a")
d = list(g.facet_data())
nt.assert_equal(len(d), 3)
tup, data = d[0]
nt.assert_equal(tup, (0, 0, 0))
nt.assert_true((data["a"] == "a").all())
tup, data = d[1]
nt.assert_equal(tup, (1, 0, 0))
nt.assert_true((data["a"] == "b").all())
g = ag.FacetGrid(self.df, row="a", col="b")
d = list(g.facet_data())
nt.assert_equal(len(d), 6)
tup, data = d[0]
nt.assert_equal(tup, (0, 0, 0))
nt.assert_true((data["a"] == "a").all())
nt.assert_true((data["b"] == "m").all())
tup, data = d[1]
nt.assert_equal(tup, (0, 1, 0))
nt.assert_true((data["a"] == "a").all())
nt.assert_true((data["b"] == "n").all())
tup, data = d[2]
nt.assert_equal(tup, (1, 0, 0))
nt.assert_true((data["a"] == "b").all())
nt.assert_true((data["b"] == "m").all())
g = ag.FacetGrid(self.df, hue="c")
d = list(g.facet_data())
nt.assert_equal(len(d), 3)
tup, data = d[1]
nt.assert_equal(tup, (0, 0, 1))
nt.assert_true((data["c"] == "u").all())
plt.close("all")
def test_map(self):
g = ag.FacetGrid(self.df, row="a", col="b", hue="c")
g.map(plt.plot, "x", "y", linewidth=3)
lines = g.axes[0, 0].lines
nt.assert_equal(len(lines), 3)
line1, _, _ = lines
nt.assert_equal(line1.get_linewidth(), 3)
x, y = line1.get_data()
mask = (self.df.a == "a") & (self.df.b == "m") & (self.df.c == "t")
npt.assert_array_equal(x, self.df.x[mask])
npt.assert_array_equal(y, self.df.y[mask])
def test_map_dataframe(self):
g = ag.FacetGrid(self.df, row="a", col="b", hue="c")
plot = lambda x, y, data=None, **kws: plt.plot(data[x], data[y], **kws)
g.map_dataframe(plot, "x", "y", linestyle="--")
lines = g.axes[0, 0].lines
nt.assert_equal(len(lines), 3)
line1, _, _ = lines
nt.assert_equal(line1.get_linestyle(), "--")
x, y = line1.get_data()
mask = (self.df.a == "a") & (self.df.b == "m") & (self.df.c == "t")
npt.assert_array_equal(x, self.df.x[mask])
npt.assert_array_equal(y, self.df.y[mask])
def test_set(self):
g = ag.FacetGrid(self.df, row="a", col="b")
xlim = (-2, 5)
ylim = (3, 6)
xticks = [-2, 0, 3, 5]
yticks = [3, 4.5, 6]
g.set(xlim=xlim, ylim=ylim, xticks=xticks, yticks=yticks)
for ax in g.axes.flat:
npt.assert_array_equal(ax.get_xlim(), xlim)
npt.assert_array_equal(ax.get_ylim(), ylim)
npt.assert_array_equal(ax.get_xticks(), xticks)
npt.assert_array_equal(ax.get_yticks(), yticks)
plt.close("all")
def test_set_titles(self):
g = ag.FacetGrid(self.df, row="a", col="b")
g.map(plt.plot, "x", "y")
# Test the default titles
nt.assert_equal(g.axes[0, 0].get_title(), "a = a | b = m")
nt.assert_equal(g.axes[0, 1].get_title(), "a = a | b = n")
nt.assert_equal(g.axes[1, 0].get_title(), "a = b | b = m")
# Test a provided title
g.set_titles("{row_var} == {row_name} \/ {col_var} == {col_name}")
nt.assert_equal(g.axes[0, 0].get_title(), "a == a \/ b == m")
nt.assert_equal(g.axes[0, 1].get_title(), "a == a \/ b == n")
nt.assert_equal(g.axes[1, 0].get_title(), "a == b \/ b == m")
# Test a single row
g = ag.FacetGrid(self.df, col="b")
g.map(plt.plot, "x", "y")
# Test the default titles
nt.assert_equal(g.axes[0, 0].get_title(), "b = m")
nt.assert_equal(g.axes[0, 1].get_title(), "b = n")
# test with dropna=False
g = ag.FacetGrid(self.df, col="b", hue="b", dropna=False)
g.map(plt.plot, 'x', 'y')
plt.close("all")
def test_set_titles_margin_titles(self):
g = ag.FacetGrid(self.df, row="a", col="b", margin_titles=True)
g.map(plt.plot, "x", "y")
# Test the default titles
nt.assert_equal(g.axes[0, 0].get_title(), "b = m")
nt.assert_equal(g.axes[0, 1].get_title(), "b = n")
nt.assert_equal(g.axes[1, 0].get_title(), "")
# Test the row "titles"
nt.assert_equal(g.axes[0, 1].texts[0].get_text(), "a = a")
nt.assert_equal(g.axes[1, 1].texts[0].get_text(), "a = b")
# Test a provided title
g.set_titles(col_template="{col_var} == {col_name}")
nt.assert_equal(g.axes[0, 0].get_title(), "b == m")
nt.assert_equal(g.axes[0, 1].get_title(), "b == n")
nt.assert_equal(g.axes[1, 0].get_title(), "")
plt.close("all")
def test_set_ticklabels(self):
g = ag.FacetGrid(self.df, row="a", col="b")
g.map(plt.plot, "x", "y")
xlab = [l.get_text() + "h" for l in g.axes[1, 0].get_xticklabels()]
ylab = [l.get_text() for l in g.axes[1, 0].get_yticklabels()]
g.set_xticklabels(xlab)
g.set_yticklabels(rotation=90)
got_x = [l.get_text() + "h" for l in g.axes[1, 1].get_xticklabels()]
got_y = [l.get_text() for l in g.axes[0, 0].get_yticklabels()]
npt.assert_array_equal(got_x, xlab)
npt.assert_array_equal(got_y, ylab)
x, y = np.arange(10), np.arange(10)
df = pd.DataFrame(np.c_[x, y], columns=["x", "y"])
g = ag.FacetGrid(df).map(pointplot, "x", "y")
g.set_xticklabels(step=2)
got_x = [int(l.get_text()) for l in g.axes[0, 0].get_xticklabels()]
npt.assert_array_equal(x[::2], got_x)
g = ag.FacetGrid(self.df, col="d", col_wrap=5)
g.map(plt.plot, "x", "y")
g.set_xticklabels(rotation=45)
g.set_yticklabels(rotation=75)
for ax in g._bottom_axes:
for l in ax.get_xticklabels():
nt.assert_equal(l.get_rotation(), 45)
for ax in g._left_axes:
for l in ax.get_yticklabels():
nt.assert_equal(l.get_rotation(), 75)
plt.close("all")
def test_set_axis_labels(self):
g = ag.FacetGrid(self.df, row="a", col="b")
g.map(plt.plot, "x", "y")
xlab = 'xx'
ylab = 'yy'
g.set_axis_labels(xlab, ylab)
got_x = [ax.get_xlabel() for ax in g.axes[-1, :]]
got_y = [ax.get_ylabel() for ax in g.axes[:, 0]]
npt.assert_array_equal(got_x, xlab)
npt.assert_array_equal(got_y, ylab)
plt.close("all")
def test_axis_lims(self):
g = ag.FacetGrid(self.df, row="a", col="b", xlim=(0, 4), ylim=(-2, 3))
nt.assert_equal(g.axes[0, 0].get_xlim(), (0, 4))
nt.assert_equal(g.axes[0, 0].get_ylim(), (-2, 3))
plt.close("all")
def test_data_orders(self):
g = ag.FacetGrid(self.df, row="a", col="b", hue="c")
nt.assert_equal(g.row_names, list("abc"))
nt.assert_equal(g.col_names, list("mn"))
nt.assert_equal(g.hue_names, list("tuv"))
nt.assert_equal(g.axes.shape, (3, 2))
g = ag.FacetGrid(self.df, row="a", col="b", hue="c",
row_order=list("bca"),
col_order=list("nm"),
hue_order=list("vtu"))
nt.assert_equal(g.row_names, list("bca"))
nt.assert_equal(g.col_names, list("nm"))
nt.assert_equal(g.hue_names, list("vtu"))
nt.assert_equal(g.axes.shape, (3, 2))
g = ag.FacetGrid(self.df, row="a", col="b", hue="c",
row_order=list("bcda"),
col_order=list("nom"),
hue_order=list("qvtu"))
nt.assert_equal(g.row_names, list("bcda"))
nt.assert_equal(g.col_names, list("nom"))
nt.assert_equal(g.hue_names, list("qvtu"))
nt.assert_equal(g.axes.shape, (4, 3))
plt.close("all")
def test_palette(self):
rcmod.set()
g = ag.FacetGrid(self.df, hue="c")
nt.assert_equal(g._colors, color_palette(n_colors=3))
g = ag.FacetGrid(self.df, hue="d")
nt.assert_equal(g._colors, color_palette("husl", 10))
g = ag.FacetGrid(self.df, hue="c", palette="Set2")
nt.assert_equal(g._colors, color_palette("Set2", 3))
dict_pal = dict(t="red", u="green", v="blue")
list_pal = color_palette(["red", "green", "blue"], 3)
g = ag.FacetGrid(self.df, hue="c", palette=dict_pal)
nt.assert_equal(g._colors, list_pal)
list_pal = color_palette(["green", "blue", "red"], 3)
g = ag.FacetGrid(self.df, hue="c", hue_order=list("uvt"),
palette=dict_pal)
nt.assert_equal(g._colors, list_pal)
plt.close("all")
def test_hue_kws(self):
kws = dict(marker=["o", "s", "D"])
g = ag.FacetGrid(self.df, hue="c", hue_kws=kws)
g.map(plt.plot, "x", "y")
for line, marker in zip(g.axes[0, 0].lines, kws["marker"]):
nt.assert_equal(line.get_marker(), marker)
def test_dropna(self):
df = self.df.copy()
hasna = pd.Series(np.tile(np.arange(6), 10), dtype=np.float)
hasna[hasna == 5] = np.nan
df["hasna"] = hasna
g = ag.FacetGrid(df, dropna=False, row="hasna")
nt.assert_equal(g._not_na.sum(), 60)
g = ag.FacetGrid(df, dropna=True, row="hasna")
nt.assert_equal(g._not_na.sum(), 50)
plt.close("all")
@classmethod
def teardown_class(cls):
"""Ensure that all figures are closed on exit."""
plt.close("all")
class TestPairGrid(object):
rs = np.random.RandomState(sum(map(ord, "PairGrid")))
df = pd.DataFrame(dict(x=rs.normal(size=80),
y=rs.randint(0, 4, size=(80)),
z=rs.gamma(3, size=80),
a=np.repeat(list("abcd"), 20),
b=np.repeat(list("abcdefgh"), 10)))
def test_self_data(self):
g = ag.PairGrid(self.df)
nt.assert_is(g.data, self.df)
plt.close("all")
def test_ignore_datelike_data(self):
df = self.df.copy()
df['date'] = pd.date_range('2010-01-01', periods=len(df), freq='d')
result = ag.PairGrid(self.df).data
expected = df.drop('date', axis=1)
tm.assert_frame_equal(result, expected)
plt.close("all")
def test_self_fig(self):
g = ag.PairGrid(self.df)
nt.assert_is_instance(g.fig, plt.Figure)
plt.close("all")
def test_self_axes(self):
g = ag.PairGrid(self.df)
for ax in g.axes.flat:
nt.assert_is_instance(ax, plt.Axes)
plt.close("all")
def test_default_axes(self):
g = ag.PairGrid(self.df)
nt.assert_equal(g.axes.shape, (3, 3))
nt.assert_equal(g.x_vars, ["x", "y", "z"])
nt.assert_equal(g.y_vars, ["x", "y", "z"])
nt.assert_true(g.square_grid)
plt.close("all")
def test_specific_square_axes(self):
vars = ["z", "x"]
g = ag.PairGrid(self.df, vars=vars)
nt.assert_equal(g.axes.shape, (len(vars), len(vars)))
nt.assert_equal(g.x_vars, vars)
nt.assert_equal(g.y_vars, vars)
nt.assert_true(g.square_grid)
plt.close("all")
def test_specific_nonsquare_axes(self):
x_vars = ["x", "y"]
y_vars = ["z", "y", "x"]
g = ag.PairGrid(self.df, x_vars=x_vars, y_vars=y_vars)
nt.assert_equal(g.axes.shape, (len(y_vars), len(x_vars)))
nt.assert_equal(g.x_vars, x_vars)
nt.assert_equal(g.y_vars, y_vars)
nt.assert_true(not g.square_grid)
x_vars = ["x", "y"]
y_vars = "z"
g = ag.PairGrid(self.df, x_vars=x_vars, y_vars=y_vars)
nt.assert_equal(g.axes.shape, (len(y_vars), len(x_vars)))
nt.assert_equal(g.x_vars, list(x_vars))
nt.assert_equal(g.y_vars, list(y_vars))
nt.assert_true(not g.square_grid)
plt.close("all")
def test_specific_square_axes_with_array(self):
vars = np.array(["z", "x"])
g = ag.PairGrid(self.df, vars=vars)
nt.assert_equal(g.axes.shape, (len(vars), len(vars)))
nt.assert_equal(g.x_vars, list(vars))
nt.assert_equal(g.y_vars, list(vars))
nt.assert_true(g.square_grid)
plt.close("all")
def test_specific_nonsquare_axes_with_array(self):
x_vars = np.array(["x", "y"])
y_vars = np.array(["z", "y", "x"])
g = ag.PairGrid(self.df, x_vars=x_vars, y_vars=y_vars)
nt.assert_equal(g.axes.shape, (len(y_vars), len(x_vars)))
nt.assert_equal(g.x_vars, list(x_vars))
nt.assert_equal(g.y_vars, list(y_vars))
nt.assert_true(not g.square_grid)
plt.close("all")
def test_size(self):
g1 = ag.PairGrid(self.df, size=3)
npt.assert_array_equal(g1.fig.get_size_inches(), (9, 9))
g2 = ag.PairGrid(self.df, size=4, aspect=.5)
npt.assert_array_equal(g2.fig.get_size_inches(), (6, 12))
g3 = ag.PairGrid(self.df, y_vars=["z"], x_vars=["x", "y"],
size=2, aspect=2)
npt.assert_array_equal(g3.fig.get_size_inches(), (8, 2))
plt.close("all")
def test_map(self):
vars = ["x", "y", "z"]
g1 = ag.PairGrid(self.df)
g1.map(plt.scatter)
for i, axes_i in enumerate(g1.axes):
for j, ax in enumerate(axes_i):
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
g2 = ag.PairGrid(self.df, "a")
g2.map(plt.scatter)
for i, axes_i in enumerate(g2.axes):
for j, ax in enumerate(axes_i):
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
for k, k_level in enumerate("abcd"):
x_in_k = x_in[self.df.a == k_level]
y_in_k = y_in[self.df.a == k_level]
x_out, y_out = ax.collections[k].get_offsets().T
npt.assert_array_equal(x_in_k, x_out)
npt.assert_array_equal(y_in_k, y_out)
plt.close("all")
def test_map_nonsquare(self):
x_vars = ["x"]
y_vars = ["y", "z"]
g = ag.PairGrid(self.df, x_vars=x_vars, y_vars=y_vars)
g.map(plt.scatter)
x_in = self.df.x
for i, i_var in enumerate(y_vars):
ax = g.axes[i, 0]
y_in = self.df[i_var]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
plt.close("all")
def test_map_lower(self):
vars = ["x", "y", "z"]
g = ag.PairGrid(self.df)
g.map_lower(plt.scatter)
for i, j in zip(*np.tril_indices_from(g.axes, -1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.triu_indices_from(g.axes)):
ax = g.axes[i, j]
nt.assert_equal(len(ax.collections), 0)
plt.close("all")
def test_map_upper(self):
vars = ["x", "y", "z"]
g = ag.PairGrid(self.df)
g.map_upper(plt.scatter)
for i, j in zip(*np.triu_indices_from(g.axes, 1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.tril_indices_from(g.axes)):
ax = g.axes[i, j]
nt.assert_equal(len(ax.collections), 0)
plt.close("all")
@skipif(old_matplotlib)
def test_map_diag(self):
g1 = ag.PairGrid(self.df)
g1.map_diag(plt.hist)
for ax in g1.diag_axes:
nt.assert_equal(len(ax.patches), 10)
g2 = ag.PairGrid(self.df)
g2.map_diag(plt.hist, bins=15)
for ax in g2.diag_axes:
nt.assert_equal(len(ax.patches), 15)
g3 = ag.PairGrid(self.df, hue="a")
g3.map_diag(plt.hist)
for ax in g3.diag_axes:
nt.assert_equal(len(ax.patches), 40)
plt.close("all")
@skipif(old_matplotlib)
def test_map_diag_and_offdiag(self):
vars = ["x", "y", "z"]
g = ag.PairGrid(self.df)
g.map_offdiag(plt.scatter)
g.map_diag(plt.hist)
for ax in g.diag_axes:
nt.assert_equal(len(ax.patches), 10)
for i, j in zip(*np.triu_indices_from(g.axes, 1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.tril_indices_from(g.axes, -1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.diag_indices_from(g.axes)):
ax = g.axes[i, j]
nt.assert_equal(len(ax.collections), 0)
plt.close("all")
def test_palette(self):
rcmod.set()
g = ag.PairGrid(self.df, hue="a")
nt.assert_equal(g.palette, color_palette(n_colors=4))
g = ag.PairGrid(self.df, hue="b")
nt.assert_equal(g.palette, color_palette("husl", 8))
g = ag.PairGrid(self.df, hue="a", palette="Set2")
nt.assert_equal(g.palette, color_palette("Set2", 4))
dict_pal = dict(a="red", b="green", c="blue", d="purple")
list_pal = color_palette(["red", "green", "blue", "purple"], 4)
g = ag.PairGrid(self.df, hue="a", palette=dict_pal)
nt.assert_equal(g.palette, list_pal)
list_pal = color_palette(["purple", "blue", "red", "green"], 4)
g = ag.PairGrid(self.df, hue="a", hue_order=list("dcab"),
palette=dict_pal)
nt.assert_equal(g.palette, list_pal)
plt.close("all")
def test_hue_kws(self):
kws = dict(marker=["o", "s", "d", "+"])
g = ag.PairGrid(self.df, hue="a", hue_kws=kws)
g.map(plt.plot)
for line, marker in zip(g.axes[0, 0].lines, kws["marker"]):
nt.assert_equal(line.get_marker(), marker)
g = ag.PairGrid(self.df, hue="a", hue_kws=kws,
hue_order=list("dcab"))
g.map(plt.plot)
for line, marker in zip(g.axes[0, 0].lines, kws["marker"]):
nt.assert_equal(line.get_marker(), marker)
plt.close("all")
@skipif(old_matplotlib)
def test_hue_order(self):
order = list("dcab")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map(plt.plot)
for line, level in zip(g.axes[1, 0].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "x"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "y"])
plt.close("all")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map_diag(plt.plot)
for line, level in zip(g.axes[0, 0].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "x"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "x"])
plt.close("all")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map_lower(plt.plot)
for line, level in zip(g.axes[1, 0].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "x"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "y"])
plt.close("all")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map_upper(plt.plot)
for line, level in zip(g.axes[0, 1].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "y"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "x"])
plt.close("all")
@skipif(old_matplotlib)
def test_hue_order_missing_level(self):
order = list("dcaeb")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map(plt.plot)
for line, level in zip(g.axes[1, 0].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "x"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "y"])
plt.close("all")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map_diag(plt.plot)
for line, level in zip(g.axes[0, 0].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "x"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "x"])
plt.close("all")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map_lower(plt.plot)
for line, level in zip(g.axes[1, 0].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "x"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "y"])
plt.close("all")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map_upper(plt.plot)
for line, level in zip(g.axes[0, 1].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "y"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "x"])
plt.close("all")
def test_nondefault_index(self):
df = self.df.copy().set_index("b")
vars = ["x", "y", "z"]
g1 = ag.PairGrid(df)
g1.map(plt.scatter)
for i, axes_i in enumerate(g1.axes):
for j, ax in enumerate(axes_i):
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
g2 = ag.PairGrid(df, "a")
g2.map(plt.scatter)
for i, axes_i in enumerate(g2.axes):
for j, ax in enumerate(axes_i):
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
for k, k_level in enumerate("abcd"):
x_in_k = x_in[self.df.a == k_level]
y_in_k = y_in[self.df.a == k_level]
x_out, y_out = ax.collections[k].get_offsets().T
npt.assert_array_equal(x_in_k, x_out)
npt.assert_array_equal(y_in_k, y_out)
plt.close("all")
@skipif(old_matplotlib)
def test_pairplot(self):
vars = ["x", "y", "z"]
g = pairplot(self.df)
for ax in g.diag_axes:
nt.assert_equal(len(ax.patches), 10)
for i, j in zip(*np.triu_indices_from(g.axes, 1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.tril_indices_from(g.axes, -1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.diag_indices_from(g.axes)):
ax = g.axes[i, j]
nt.assert_equal(len(ax.collections), 0)
plt.close("all")
@skipif(old_matplotlib)
def test_pairplot_reg(self):
vars = ["x", "y", "z"]
g = pairplot(self.df, kind="reg")
for ax in g.diag_axes:
nt.assert_equal(len(ax.patches), 10)
for i, j in zip(*np.triu_indices_from(g.axes, 1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
nt.assert_equal(len(ax.lines), 1)
nt.assert_equal(len(ax.collections), 2)
for i, j in zip(*np.tril_indices_from(g.axes, -1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
nt.assert_equal(len(ax.lines), 1)
nt.assert_equal(len(ax.collections), 2)
for i, j in zip(*np.diag_indices_from(g.axes)):
ax = g.axes[i, j]
nt.assert_equal(len(ax.collections), 0)
plt.close("all")
@skipif(old_matplotlib)
def test_pairplot_kde(self):
vars = ["x", "y", "z"]
g = pairplot(self.df, diag_kind="kde")
for ax in g.diag_axes:
nt.assert_equal(len(ax.lines), 1)
for i, j in zip(*np.triu_indices_from(g.axes, 1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.tril_indices_from(g.axes, -1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.diag_indices_from(g.axes)):
ax = g.axes[i, j]
nt.assert_equal(len(ax.collections), 0)
plt.close("all")
@skipif(old_matplotlib)
def test_pairplot_markers(self):
vars = ["x", "y", "z"]
markers = ["o", "x", "s", "d"]
g = pairplot(self.df, hue="a", vars=vars, markers=markers)
nt.assert_equal(g.hue_kws["marker"], markers)
plt.close("all")
with nt.assert_raises(ValueError):
g = pairplot(self.df, hue="a", vars=vars, markers=markers[:-2])
@classmethod
def teardown_class(cls):
"""Ensure that all figures are closed on exit."""
plt.close("all")
class TestJointGrid(object):
rs = np.random.RandomState(sum(map(ord, "JointGrid")))
x = rs.randn(100)
y = rs.randn(100)
x_na = x.copy()
x_na[10] = np.nan
x_na[20] = np.nan
data = pd.DataFrame(dict(x=x, y=y, x_na=x_na))
def test_margin_grid_from_arrays(self):
g = ag.JointGrid(self.x, self.y)
npt.assert_array_equal(g.x, self.x)
npt.assert_array_equal(g.y, self.y)
plt.close("all")
def test_margin_grid_from_series(self):
g = ag.JointGrid(self.data.x, self.data.y)
npt.assert_array_equal(g.x, self.x)
npt.assert_array_equal(g.y, self.y)
plt.close("all")
def test_margin_grid_from_dataframe(self):
g = ag.JointGrid("x", "y", self.data)
npt.assert_array_equal(g.x, self.x)
npt.assert_array_equal(g.y, self.y)
plt.close("all")
def test_margin_grid_axis_labels(self):
g = ag.JointGrid("x", "y", self.data)
xlabel, ylabel = g.ax_joint.get_xlabel(), g.ax_joint.get_ylabel()
nt.assert_equal(xlabel, "x")
nt.assert_equal(ylabel, "y")
g.set_axis_labels("x variable", "y variable")
xlabel, ylabel = g.ax_joint.get_xlabel(), g.ax_joint.get_ylabel()
nt.assert_equal(xlabel, "x variable")
nt.assert_equal(ylabel, "y variable")
plt.close("all")
def test_dropna(self):
g = ag.JointGrid("x_na", "y", self.data, dropna=False)
nt.assert_equal(len(g.x), len(self.x_na))
g = ag.JointGrid("x_na", "y", self.data, dropna=True)
nt.assert_equal(len(g.x), pd.notnull(self.x_na).sum())
plt.close("all")
def test_axlims(self):
lim = (-3, 3)
g = ag.JointGrid("x", "y", self.data, xlim=lim, ylim=lim)
nt.assert_equal(g.ax_joint.get_xlim(), lim)
nt.assert_equal(g.ax_joint.get_ylim(), lim)
nt.assert_equal(g.ax_marg_x.get_xlim(), lim)
nt.assert_equal(g.ax_marg_y.get_ylim(), lim)
def test_marginal_ticks(self):
g = ag.JointGrid("x", "y", self.data)
nt.assert_true(~len(g.ax_marg_x.get_xticks()))
nt.assert_true(~len(g.ax_marg_y.get_yticks()))
plt.close("all")
def test_bivariate_plot(self):
g = ag.JointGrid("x", "y", self.data)
g.plot_joint(plt.plot)
x, y = g.ax_joint.lines[0].get_xydata().T
npt.assert_array_equal(x, self.x)
npt.assert_array_equal(y, self.y)
plt.close("all")
def test_univariate_plot(self):
g = ag.JointGrid("x", "x", self.data)
g.plot_marginals(kdeplot)
_, y1 = g.ax_marg_x.lines[0].get_xydata().T
y2, _ = g.ax_marg_y.lines[0].get_xydata().T
npt.assert_array_equal(y1, y2)
plt.close("all")
def test_plot(self):
g = ag.JointGrid("x", "x", self.data)
g.plot(plt.plot, kdeplot)
x, y = g.ax_joint.lines[0].get_xydata().T
npt.assert_array_equal(x, self.x)
npt.assert_array_equal(y, self.x)
_, y1 = g.ax_marg_x.lines[0].get_xydata().T
y2, _ = g.ax_marg_y.lines[0].get_xydata().T
npt.assert_array_equal(y1, y2)
plt.close("all")
def test_annotate(self):
g = ag.JointGrid("x", "y", self.data)
rp = stats.pearsonr(self.x, self.y)
g.annotate(stats.pearsonr)
annotation = g.ax_joint.legend_.texts[0].get_text()
nt.assert_equal(annotation, "pearsonr = %.2g; p = %.2g" % rp)
g.annotate(stats.pearsonr, stat="correlation")
annotation = g.ax_joint.legend_.texts[0].get_text()
nt.assert_equal(annotation, "correlation = %.2g; p = %.2g" % rp)
def rsquared(x, y):
return stats.pearsonr(x, y)[0] ** 2
r2 = rsquared(self.x, self.y)
g.annotate(rsquared)
annotation = g.ax_joint.legend_.texts[0].get_text()
nt.assert_equal(annotation, "rsquared = %.2g" % r2)
template = "{stat} = {val:.3g} (p = {p:.3g})"
g.annotate(stats.pearsonr, template=template)
annotation = g.ax_joint.legend_.texts[0].get_text()
nt.assert_equal(annotation, template.format(stat="pearsonr",
val=rp[0], p=rp[1]))
plt.close("all")
def test_space(self):
g = ag.JointGrid("x", "y", self.data, space=0)
joint_bounds = g.ax_joint.bbox.bounds
marg_x_bounds = g.ax_marg_x.bbox.bounds
marg_y_bounds = g.ax_marg_y.bbox.bounds
nt.assert_equal(joint_bounds[2], marg_x_bounds[2])
nt.assert_equal(joint_bounds[3], marg_y_bounds[3])
@classmethod
def teardown_class(cls):
"""Ensure that all figures are closed on exit."""
plt.close("all")
| bsd-3-clause |
fruce-ki/utility_scripts | sequtilities.py | 1 | 53692 | #!/usr/bin/env python3
"""sequtilities.py
Author: Kimon Froussios
Date last revised: 17/02/2020
Library of utility functions relevant to sequencing tasks.
"""
import os, sys, argparse, re, csv
import pandas as pd
from collections import Counter
import Levenshtein as lev
import pysam
from Bio import SeqIO
import fileutilities as fu
import mylogs as ml
def collect_starFinalLogs(flist, all=False):
"""Combine the listed Log.final.out files into a pandas Dataframe.
File identifiers (filenames) will be trimmed at '_Log'.
Args:
flist: A list/FilesList of input files.
all(bool): Show all fields (False).
Returns:
pandas.DataFrame
"""
rows = None
if all:
# Still discarding some irrelevant stuff and blank columns.
rows = [2,4,5,7,8,9,10,11,12,13,14,15,16,17,18,19,20,22,23,24,25,27,28,29]
else:
rows = [4, 5, 8, 9, 23, 25, 27, 28, 29]
df = fu.get_crosspoints(flist, cols=[1], rows=rows, colSep=["\|"], header=False, index=0, merge=True)[0]
spaces = re.compile("\s{2,}|\t")
quotes = re.compile("\"")
# Clean up padding from cells.
for r in range(0, len(df.index)):
for c in range(0, len(df.columns)):
df.iloc[r,c] = spaces.sub("", str(df.iloc[r,c]))
# Clean up field descriptions.
index = df.index.values.tolist()
for i in range(0,len(index)):
index[i] = quotes.sub("", index[i])
index[i] = spaces.sub("", index[i])
df.index = index
# Clean up file identifiers from suffixes.
columns = df.columns.values.tolist()
for c in range(0, len(columns)):
columns[c] = str(columns[c]).split("_Log")[0]
df.columns = columns
# Transpose and add name to new index column.
df = pd.DataFrame.transpose(df)
df.index.name = "Name"
return df
# Helper function
def gtf2pandas(flist):
"""Import GTF files as pandas.DataFrames.
Two columns will be added: the parent_id (gene) and target_id (transcript),
as extracted from the attributes column. The attributes column itself will remain as is.
The choice of column names was made for compatibility with sleuth.
Args:
flist: A list/FilesList of GTF files.
Returns:
[pandas.DataFrame]: List of dataframes, one per file.
"""
# Use my own parser, because it already handles input from multiple files or STDIN.
input = fu.get_columns(flist, cols=list(range(0,9)), colSep=["\t"], header=False, index=None, merge=False)
result = []
for gtf in input:
gtf.columns = ["chr", "source", "feature", "start", "stop", "score", "strand", "phase", "attributes"]
gtf["parent_id"] = gtf["attributes"].str.extract('gene_id \"?([^\";]+)', expand=False)
gtf["target_id"] = gtf["attributes"].str.extract('transcript_id \"?([^\";]+)', expand=False)
gtf.set_index(gtf["parent_id"], inplace=True)
result.append(gtf)
return result
def gtf2premrna(gtfs, filter=True):
"""Infer pre-mRNA from a GTF annotation.
Create a new GTF with the earliest start and latest finish associated with each gene_id.
Args:
gtfs: A list of GTF pandas.DataFrames, imported using gtf2pandas() from this library.
filter(bool): Remove pre-mRNA models for single-model single-exon genes.
This reduces model inflation/duplication.
Returns:
[pandas.DataFrame]: List of dataframes, one per file.
"""
result = []
for g in gtfs:
# Aggregate.
grped = g.groupby("parent_id")
gnum = len(grped)
pres = pd.DataFrame( data= {
"chr" : grped.head(1)["chr"],
"source" : ['based_on_Araport11'] * gnum,
"feature" : ['exon'] * gnum, # for the benefit of existing 3rd-party parsers
"start" : grped["start"].min(),
"stop" : grped["stop"].max(),
"score" : ['.'] * gnum,
"strand" : grped.head(1)["strand"],
"phase" : ['.'] * gnum,
"attributes" : grped.head(1)["parent_id"], # Temporary value.
"parent_id" : grped.head(1)["parent_id"]
})
# Filter
if (filter):
u = grped["target_id"].apply(lambda x: len(set(x))) # Number of models per gene.
e = grped["feature"].apply(lambda x: Counter(x)["exon"]) # Number of exons per gene.
idx = pres.index
for gid in idx:
# Drop pre-mRNA entries for single-model single-exon genes.
if (u[gid] == 1 and e[gid] == 1):
pres.drop(gid, axis=0, inplace=True)
# Format the attributes.
for gid in pres.index :
pres.ix[gid, "attributes"] = 'transcript_id \"' + gid + '_pre\"; gene_id \"' + gid + '\";'
# Order columns to match GTF specification.
result.append(pres[["chr","source", "feature", "start", "stop", "score", "strand", "phase", "attributes"]])
return result
def samPatternStats(pattern, bam='-', bco=-4, bcl=4, literal=True, mmCap=2, wild='N', minFreq=0.01, filtered=False, nreads=None):
"""Find a pattern's positions and flanking sequences in a BAM.
Meant to be used to verify the position of a known spacer sequence and identify
the demultiplexing barcodes adjacent to it.
Args:
pattern(str): A sequence literal or regex pattern.
sam(str): A BAM file.
bco(int): How many positions before the tracer's start (-n) or after the
tracer's end (+n) is the barcode (-4)?
bcl(int): How many nucleotides long is the barcode (4)?
literal(bool): Is the pattern a literal sequence (True)? If so, mismatch patterns will also be created.
mmCap(int): Set upper limit to number of mismatch positions that are allowed (2).
wild(char): What singular character(s) is/are used for unknown values (N).
minFreq(int): Discard results occuring in fewer than 100 reads.
filtered(bool): The return Counters are filtered to remove rare events.
nreads(int): How many reads to base the results on, for speed. (None => all of them)
Returns:
List of Counters:
[0] Total number of reads (int)
[1] Number of reads that matched the anchor (int)
[2] collections.Counter object for read lengths
[3] collections.Counter object with read count of tracer matches
[4] collections.Counter object with read count of barcodes found
The barcode sequence is 'out_of_range' when the barcode is hanging over either end of the read
due to a very shifted tracer position.
[5] collection.Counter object with wildcard count downstream of the adapter region.
"""
patlen = len(pattern) # if literal
Lengths = Counter()
reads = 0
matched = 0
Positions = Counter()
Barcodes = Counter()
Wilds = Counter()
samin = pysam.AlignmentFile(bam, 'rb', check_sq=False) # Checking for reference sequences in the header has to be disabled for unaligned BAM.
p = None
if not literal:
p = re.compile(pattern)
# Search the pattern line by line.
for line in samin:
reads = reads + 1
seq = line.query_sequence
seqlen = len(seq)
Lengths.update( ["\t".join(['Length', str(seqlen), '.', '.'])] )
whichMinDist = None
hit = None
if literal:
# Crawl along the sequence. When there is an exact match, this should break out after a few iterations. If there is any mismatch, it will have to go to the end.
minDist = patlen # Start with the largest possible hamming distance of all-mismatches.
for i in range(0, seqlen - patlen):
dist = lev.hamming(pattern, seq[i:(i+patlen)])
if dist < mmCap and dist < minDist:
minDist = dist
whichMinDist = i
if dist == 0:
break # Can't get any better.
else:
m = pattern.search(seq)
if m :
whichMinDist = m.start() # No tie-breaker if anchor matches more than once. Just take the first. Provide more explicit patterns to reduce this occurence.
hit = m.group(0)
if whichMinDist is not None: # if there is a match
patend = whichMinDist + patlen
if patend - 1 <= seqlen: # Make sure nothing hangs off the end
matched = matched + 1
if literal:
hit = seq[whichMinDist:patend]
Positions.update( ["\t".join(['Anchor', str(minDist), hit, str(whichMinDist + 1)])] )
# ie. Anchor 2 TTCCAGCATNGCTCTNAAAC 11
# Identify the barcode
if bco is not None and bcl is not None:
pos = patend - 1 + bco if bco > 0 else whichMinDist + bco
bcend = pos + bcl
if pos >= 0 and (bcend - 1) <= seqlen: # Make sure nothing hangs off the end
Barcodes.update( ["\t".join(['Barcode', '', seq[pos:bcend], str(pos + 1)])] )
# ie. Barcode . ACGT 7
# Count wildcards downstream.
guidePos = max(bcend if bcend else 0, patend) # whichever comes last, the barcode or the spacer.
waggr = 0
for w in wild: # allow more than one wildcard characters
waggr = waggr + seq.count(w, guidePos)
Wilds.update( ["\t".join(['Wildcards', str(waggr), '', ''])] )
if (reads is not None and reads == int(nreads)): # Interrupt when the designated number of reads has been parsed
break
samin.close()
# Filter out rare events to keep output uncluttered.
if filtered:
for k in list(Lengths.keys()): # List gets all the values of the iterator before I edit the dict. That way the iterator doesn't crash.
if Lengths[k] / reads * 100 < minFreq:
del Lengths[k]
for k in list(Positions.keys()):
if Positions[k] / reads * 100 < minFreq:
del Positions[k]
for k in list(Barcodes.keys()):
if Barcodes[k] / reads * 100 < minFreq:
del Barcodes[k]
for k in list(Wilds.keys()):
if Wilds[k] / reads * 100 < minFreq:
del Wilds[k]
return [reads, matched, Lengths.most_common(), Positions.most_common(), Barcodes.most_common(), Wilds.most_common()]
def fqPatternStats(pattern, fastq, bco=-4, bcl=4, literal=True, mmCap=2, wild='N', minFreq=0.01, filtered=False, nreads=None):
"""Find a pattern's positions and flanking sequences in a FASTQ.
Meant to be used to verify the position of a known spacer sequence and identify
the demultiplexing barcodes adjacent to it.
Args:
pattern(str): A sequence literal or regex pattern.
fastq(str): A FASTQ file.
bco(int): How many positions before the tracer's start (-n) or after the
tracer's end (+n) is the barcode (-4)?
bcl(int): How many nucleotides long is the barcode (4)?
literal(bool): Is the pattern a literal sequence (True)? If so, mismatch patterns will also be created.
mmCap(int): Set upper limit to number of mismatch positions that are allowed (2).
wild(char): What singular character(s) is/are used for unknown values (N).
minFreq(int): Discard results occuring in fewer than 100 reads.
filtered(bool): The return Counters are filtered to remove rare events.
nreads(int): How many reads to base the results on, for speed. (None => all of them)
Returns:
List of Counters:
[0] Total number of reads (int)
[1] Number of reads that matched the anchor (int)
[2] collections.Counter object for read lengths
[3] collections.Counter object with read count of tracer matches
[4] collections.Counter object with read count of barcodes found
The barcode sequence is 'out_of_range' when the barcode is hanging over either end of the read
due to a very shifted tracer position.
[5] collection.Counter object with wildcard count downstream of the adapter region.
"""
patlen = len(pattern) # if literal
Lengths = Counter()
reads = 0
matched = 0
Positions = Counter()
Barcodes = Counter()
Wilds = Counter()
with open(fastq, 'rU') as fqin:
p = None
if not literal:
p = re.compile(pattern)
# Search the pattern line by line.
for record in SeqIO.parse(fqin, 'fastq'):
reads = reads + 1
seq = str(record.seq)
seqlen = len(seq)
Lengths.update( ["\t".join(['Length', str(seqlen), '.', '.'])] )
whichMinDist = None
minDist = None
hit = None
if literal:
# Crawl along the sequence. When there is an exact match, this should break out after a few iterations. If there is any mismatch, it will have to go to the end.
minDist = patlen # Start with the largest possible hamming distance of all-mismatches.
for i in range(0, seqlen - patlen):
dist = lev.hamming(pattern, seq[i:(i+patlen)])
if dist < mmCap and dist < minDist:
minDist = dist
whichMinDist = i
if dist == 0:
break # Can't get any better.
else:
m = p.search(seq)
if m :
whichMinDist = m.start() # No tie-breaker if anchor matches more than once. Just take the first. Provide more explicit patterns to reduce this occurence.
hit = m.group(0)
if whichMinDist is not None: # if there is a match
patend = whichMinDist + patlen
if patend - 1 <= seqlen: # Make sure nothing hangs off the end
matched = matched + 1
if literal:
hit = seq[whichMinDist:patend]
Positions.update( ["\t".join(['Anchor', str(minDist), hit, str(whichMinDist + 1)])] )
# ie. Anchor 2 TTCCAGCATNGCTCTNAAAC 11
# Identify the barcode
if bco is not None and bcl is not None:
pos = patend - 1 + bco if bco > 0 else whichMinDist + bco
bcend = pos + bcl
if pos >= 0 and (bcend - 1) <= seqlen: # Make sure nothing hangs off the end
Barcodes.update( ["\t".join(['Barcode', '', seq[pos:bcend], str(pos + 1)])] )
# ie. Barcode . ACGT 7
# Count wildcards downstream.
guidePos = max(bcend if bcend else 0, patend) # whichever comes last, the barcode or the spacer.
waggr = 0
for w in wild: # allow more than one wildcard characters
waggr = waggr + seq.count(w, guidePos)
Wilds.update( ["\t".join(['Wildcards', str(waggr), '', ''])] )
if (reads is not None and reads == nreads): # Interrupt when the designated number of reads has been parsed
break
# Filter out rare events to keep output uncluttered.
if filtered:
for k in list(Lengths.keys()): # List gets all the values of the iterator before I edit the dict. That way the iterator doesn't crash.
if Lengths[k] / reads * 100 < minFreq:
del Lengths[k]
for k in list(Positions.keys()):
if Positions[k] / reads * 100 < minFreq:
del Positions[k]
for k in list(Barcodes.keys()):
if Barcodes[k] / reads * 100 < minFreq:
del Barcodes[k]
for k in list(Wilds.keys()):
if Wilds[k] / reads * 100 < minFreq:
del Wilds[k]
return [reads, matched, Lengths.most_common(), Positions.most_common(), Barcodes.most_common(), Wilds.most_common()]
def encodeQuals(quals, offset=33):
"""Recode a numeric list into a Phred string"""
qual = ''
for q in quals:
if sys.version_info[0] < 3:
qual = qual + str(unichr(q + offset))
else:
qual = qual + str(chr(q + offset))
return(qual)
def demuxWAnchor(bam, barcodes, outputdir='./process/fastq', tally=None, anchorSeq='TTCCAGCATAGCTCTTAAAC', anchorRegex=False, smm=2, bcOffset=-4, bcmm=1, guideLen=20, abort=30, qualOffset=33, unmatched=False, trimQC=False):
"""Demultiplexing BAM file with variable length 5' construct of barcode and spacer.
Uses an anchoring sequence as reference position to find demultiplexing barcodes.
Arbitrary spacers may be present before and between barcode and anchor.
The biological portion of the read MUST start immediately after the anchor or the barcode, which ever is last.
Args:
bam : Input BAM file. Single-end reads.
outputdir : Output directory where demultiplexed fastq files will be saved.
tally : File to write a tally of the reads assigned to each sample. (Default STDOUT)
bcOffset : Start position offset of the demultiplexing barcode, relative to the spacer.
Positive for downstream of the spacer end, negative for upstream of the spacer start.
Negative signs must be excaped.
guideLen : Guides will be clipped at this length.
anchorSeq : Spacer sequence to anchor.
anchorRegex : `anchorSeq` is a regex.
barcodes : Demultiplexing table, tab-delimited (lane, sample_name, barcode, anchor_pos).
`anchor_pos` is 1-based and refers to the start of the anchoring spacer, NOT the samples barcode start!
If omitted, anchoring will fall back to regex search.
bcmm : Mismatches allowed in matching the demultiplexing barcodes.
smm : Mismatches allowed in matching the spacer sequence.
qualOffset : Base-call quality offset for conversion from pysam to fastq.
abort : Upper limit for how far into the read to search for the anchor, when no explicit positions are given in the barcodes file.
unmatched : Create a FASTQ file for all the reads that did not match the anchor or barcode within the given tolerances.
Otherwise they will simply be ignored.
trimQC : Create a partly-trimmed additional FASTQ (ending in .fqc) that includes the barcode and anchor untrimmed. Only what's upstream of them is trimmed.
In case you want to generate stats reports for the barcodes and anchor.
Returns:
True on completion
Raises:
ValueError
Exception
"""
# Clean up the lane name
lane = os.path.basename(bam)
if lane[(len(lane)-4):len(lane)] == '.bam':
lane = lane[0:(len(lane)-4)] # crop .bam suffix
# Demultiplexing dictionaries
demuxS1 = dict() # demuxS1[barcode] = sample
spacerP = list() # spacerP = [positions]
demuxB = dict() # demuxB[position] = [barcodes]
withPos = False # Explicit anchor positions provided
# Parse barcodes
with open(barcodes, "rt") as bcFile:
csvreader = csv.DictReader(bcFile, delimiter="\t")
for i, row in enumerate(csvreader):
if i == 0:
if not ("lane" in row.keys() or "sample_name" in row.keys() or "barcode" in row.keys()):
raise Exception("Error: 'lane', 'sample_name', or 'barcode' field is missing from the barcodes table.")
if 'anchor_pos' in row.keys():
withPos = True # Spacer start positions have been defined.
if 'position' in row.keys():
exit("The 'position' field is deprecated. It should now be named 'anchor_pos'.")
if row['lane'] == lane or row['lane'] == lane + '.bam': # Only interested in the details for the lane being demultiplexed by this instance of the script.
demuxS1[ row['barcode'] ] = row['sample_name']
if withPos:
pos = int(row['anchor_pos']) - 1 # 0-based indexing
if pos < 0:
raise ValueError(' '.join("Invalid barcode position definition for", row['lane'], row['barcode'], row['sample_name']))
if pos not in spacerP:
spacerP.append(pos)
if pos not in demuxB.keys():
demuxB[pos] = list()
demuxB[pos].append(row['barcode'])
else:
# Any position is now fair game
for pos in range(0, abort):
if pos not in spacerP:
spacerP.append(pos)
if pos not in demuxB.keys():
demuxB[pos] = list()
demuxB[pos].append(row['barcode'])
# Maybe the lane specifications did not match?
if len(demuxS1) == 0:
raise Exception("It looks like no info was parsed from the barcodes table. The 'lane' column of the barcodes table include " + lane + ' or ' + lane + '.bam ?')
# Open output files
fqOut = dict()
for barcode in demuxS1.keys():
try:
os.makedirs(outputdir)
except OSError: # path already exists. Hopefully you have permission to write where you want to, so that won't be the cause.
pass
file = lane + '_' + demuxS1[barcode] + '.fq'
fqOut[demuxS1[barcode]] = open(os.path.join(outputdir, file), "w", buffering=10000000) # 10MB
unknown = None
if unmatched:
unknown = open(os.path.join(outputdir, lane + '_unmatched.fq'), "w", buffering=10000000) # 10MB
fqcOut = dict()
unknownqc = None
if trimQC:
for barcode in demuxS1.keys():
file = lane + '_' + demuxS1[barcode] + '.fqc'
fqcOut[demuxS1[barcode]] = open(os.path.join(outputdir, file), "w", buffering=10000000) # 10MB
if unmatched:
unknownqc = open(os.path.join(outputdir, lane + '_unmatched.fqc'), "w", buffering=10000000) # 10MB
# Spacer pattern
anchor = re.compile(anchorSeq) # Pattern matching
anchorLen = len(anchorSeq) # Will be overwritten later if anchorSeq is a regex
# Statistics
counter = Counter()
# Parse SAM
samin = pysam.AlignmentFile(bam, "rb", check_sq=False)
for r in samin:
counter.update(['total'])
if counter['total'] % 10000000 == 0:
sys.stderr.write(str(lane + ' : ' + str(counter['total']) + " reads processed\n"))
sys.stderr.flush()
name = r.query_name
seq = r.query_sequence
quals = r.query_qualities
# Convert qualities to ASCII Phred
qual = encodeQuals(quals, qualOffset)
# Find the position of the anchor, within given mismatch tolerance
anchorFoundAt = None
for pos in spacerP: # Scan through predefined positions. T
# This also covers the case where no positions were explicitly defined, as all the positions within the allowed range will have been generated instead.
if anchorRegex: # Just try to match the regex at the required position. Might be less efficient than anchor.search() when all positions are possible. But it's cleaner not creating a separate use case for it.
m = anchor.match(seq, pos)
if m:
anchorFoundAt = m.start()
anchorLen = m.end() - m.start()
break
else: # Calculate edit distance, not allowing indels in the anchor.
if lev.hamming(anchorSeq, seq[pos:(pos + anchorLen)]) <= smm:
anchorFoundAt = pos
break
# Demultiplex, trim
if anchorFoundAt is not None: # The anchor could be matched at the given positions with the given mismatch allowance
anchorEnd = anchorFoundAt + anchorLen
bcPos = anchorEnd - 1 + bcOffset if bcOffset > 0 else anchorFoundAt + bcOffset
bcFound = False
if bcPos >= 0:
# Scan through the barcodes expected at this anchor position
for bc in demuxB[anchorFoundAt]:
bcEnd = bcPos + len(bc)
if bcEnd <= len(seq) and lev.hamming(bc, seq[bcPos:bcEnd]) <= bcmm:
trimPos = max(bcEnd, anchorEnd) # Remember, bc can be either up- or down-stream of anchor
lentrim = trimPos + guideLen
if lentrim <= len(seq): # The guide is not cropped by read length
bcFound = True
# Print FASTQ entry
fqOut[demuxS1[bc]].write('@' + name + "\n" + seq[trimPos:lentrim] + "\n+\n" + qual[trimPos:lentrim] + "\n")
# Print partly trimmed FASTQ entry for FastQC
if trimQC:
qctrimPos = min(bcPos, anchorFoundAt)
fqcOut[demuxS1[bc]].write('@' + name + "\n" + seq[qctrimPos:lentrim] + "\n+\n" + qual[qctrimPos:lentrim] + "\n")
# Keep count
counter.update(['assigned', demuxS1[bc]])
if (not bcFound) and unmatched:
unknown.write('@' + name + "\n" + seq + "\n+\n" + qual + "\n")
counter.update(['BC unmatched'])
elif unmatched:
unknown.write('@' + name + "\n" + seq + "\n+\n" + qual + "\n")
counter.update(['Anchor unmatched'])
samin.close()
# Close output files
for file in fqOut.values():
file.close()
if unmatched:
unknown.close()
if trimQC:
for file in fqcOut.values():
file.close()
if unmatched:
unknownqc.close()
# Print tally
if tally:
lf = open(tally, "w")
for k,v in counter.most_common():
lf.write( "\t".join([lane, k, str(v)]) + "\n")
lf.close()
else:
for k,v in counter.most_common():
sys.stdout.write( "\t".join([lane, k, str(v)]) + "\n")
return(True)
def demuxBC(bam, barcodes, outputdir='./process/fastq', tally=None, qualOffset=33, unmatched=False):
"""Demultiplexing BAM file according to BC and B2 tag fields.
No trimming is performed.
Keeping an index of all readname-barcode pairs currently takes up a lot of memory, try 10x the GB size of the BAM.
Args:
bam : Input BAM file. Single-end reads.
barcodes: Tabbed text file: lane, sample_name, barcode.
For dual barcodes: lane, sample_name, bar1code, bar2code.
outputdir : Output directory where demultiplexed BAM files will be saved.
tally : File to write a tally of the reads assigned to each sample. (Default STDOUT)
qualOffset : Base-call quality offset for conversion from pysam to fastq.
unmatched : Create a BAM file for all the reads that did not match the anchor or barcode within the given tolerances.
Otherwise they will simply be ignored.
Returns:
True on completion
Raises:
ValueError
Exception
"""
# Clean up the lane name
lane = os.path.basename(bam)
if lane[(len(lane)-4):len(lane)] == '.bam':
lane = lane[0:(len(lane)-4)] # crop .bam suffix
# Demultiplexing dictionaries
demuxS1 = dict() # demuxS1[barcode] = sample
demuxS2 = dict() # demuxS2[barcode] = sample
# Parse barcodes
dual = False
with open(barcodes, "rt") as bcFile:
csvreader = csv.DictReader(bcFile, delimiter="\t")
if not ("lane" in csvreader.fieldnames or "sample_name" in csvreader.fieldnames):
raise Exception("Error: 'lane' or 'sample_name' field is missing from the barcodes table.")
if 'barcode' in csvreader.fieldnames:
for i, row in enumerate(csvreader):
if row['lane'] == lane or row['lane'] == lane + '.bam': # Only interested in the details for the lane being demultiplexed by this instance of the script.
demuxS1[ row['barcode'] ] = row['sample_name']
elif 'bar2code' in csvreader.fieldnames and 'bar1code' in csvreader.fieldnames:
dual = True
for i, row in enumerate(csvreader):
if row['lane'] == lane or row['lane'] == lane + '.bam': # Only interested in the details for the lane being demultiplexed by this instance of the script.
demuxS1[ row['bar1code'] ] = row['sample_name']
demuxS2[ row['bar2code'] ] = row['sample_name']
else:
raise Exception("Could not determine barcode column(s).")
# Maybe the lane specifications did not match?
if len(demuxS1) == 0:
raise Exception("It looks like no info was parsed from the barcodes table. Does the 'lane' column of the barcodes table include " + lane + ' or ' + lane + '.bam ?')
# Open input. Need it as template for the output files.
samin = pysam.AlignmentFile(bam, "rb", check_sq=False)
# Open output files
try:
os.makedirs(outputdir)
except OSError: # path already exists. Hopefully you have permission to write where you want to.
pass
samOut = dict()
for barcode in demuxS1.keys():
file = demuxS1[barcode] + '_' + barcode + '.bam'
samOut[demuxS1[barcode]] = pysam.AlignmentFile(os.path.join(outputdir, file), "wb", template=samin)
unknown = None
if unmatched:
unknown = pysam.AlignmentFile(os.path.join(outputdir, lane + '_unmatched.bam'), "wb", template=samin)
# Parse SAM
seen = dict() # Keep track of seen fragment names (for paired-end, where only the first read may have a BC tag)
seen2 = dict()
counter = Counter() # Report the numbers of reads
for r in samin:
counter.update(['total'])
if counter['total'] % 10000000 == 0:
sys.stderr.write(str(lane + ' : ' + str(counter['total']) + " reads processed\n"))
sys.stderr.flush()
name = r.query_name
seq = r.query_sequence
quals = r.query_qualities
bc = None
b2 = None
if r.has_tag('BC'): # probably first read of the fragment.
bc = r.get_tag('BC')
seen[name] = bc
else:
bc = seen[name] # second/later read. Use BC from first read.
if dual:
if r.has_tag('B2'): # secondary barcode, from dual indexing
b2 = r.get_tag('B2')
seen2[name] = b2
else: # only try to fetch prexisting secondary barcode if dual indexing
b2 = seen2[name]
# Sample assigned to 2nd barcode, for comparison to 1st barcode.
sample = None
if dual:
for b in demuxS2.keys(): # Allow for the annotated barcode in the table to be truncated relative to the actual barcode recorded in the BAM. No mismatches.
if b in b2:
sample = demuxS2[b]
# Print BAM entry
for b in demuxS1: # Allow for the annotated barcode in the table to be truncated relative to the actual barcode recorded in the BAM. No mismatches.
if b in bc and ((not dual) or (dual and sample == demuxS1[b])): # in dual, both barcodes must point to the same sample, thus excluding barcode drifts.
samOut[demuxS1[b]].write(r)
# Keep count
counter.update(['assigned', demuxS1[b]])
break
else:
if unmatched:
unknown.write(r)
counter.update(['Barcode unmatched'])
samin.close()
# Close output files
for file in samOut.values():
file.close()
if unmatched:
unknown.close()
# Print tally
if tally:
lf = open(tally, "w")
for k,v in counter.most_common():
lf.write( "\t".join([lane, k, str(v)]) + "\n")
lf.close()
else:
for k,v in counter.most_common():
sys.stdout.write( "\t".join([lane, k, str(v)]) + "\n")
return(True)
def bed_from_regex(flist, rx, rc=False, name='match'):
"""Create a bedfile for the matches of a regular expression.
The whole sequence will be loaded to memory, twice if reverse complement is required.
Case-sensitive. Uses standard python re module, no support for nucleotide/aminoacid wildcards.
Args:
flist(FilesList): FASTA files.
rx(str): A regular expression.
rc(bool): Also search in the reverse complement? (Default False)
name(str): What to label the features in the track.
Returns:
[str]: List of BED-like rows.
"""
res = []
p = re.compile(rx)
for f, (myfile, myalias) in flist.enum():
with open (myfile, "rU") as fasta:
for record in SeqIO.parse(fasta, "fasta"):
for m in p.finditer( str(record.seq) ):
# Match coordinates are 0-based and endd-non-inclusive, and BED wants them like that too.
res.append( '%s\t%d\t%d\t%s\t%d\t%c\t%d\t%d' % (record.id, m.start(), m.end(), m.group(0), 0, '+', m.start(), m.end()) )
if rc:
rcseq = str(record.seq.reverse_complement())
lenrc = len(rcseq)
for m in p.finditer(rcseq):
res.append( "%s\t%d\t%d\t%s\t%d\t%c\t%d\t%d" % (record.id, lenrc - m.end(), lenrc - m.start(), m.group(0), 0, '-' if not rc else '-', m.start(), m.end()) )
return(res)
def filter_bam_by_region(flist, regions, outfiles):
"""Select alignments that overlap the regions.
Whole alignments, not trimmed to the region (unlike samtools view)
Args:
flist(FilesList): BAM files.
regions[tuple(str,int,int)]: chr, start (inclusive), end (inclusive), 1-based.
outfiles[str]: Respective output files for the input files in flist.
"""
for i, (myfile, myalias) in flist.enum():
samin = pysam.AlignmentFile(myfile, 'rb')
samout = pysam.AlignmentFile(outfiles[i], 'wb', template=samin)
for record in samin:
if r[0] == record.reference_name:
for r in regions:
##query_alignment_start 0-based exclusive
##query_alignment_end 0-based inclusive
# if alignment starts or ends in the region, or contains the whole region
if (r[1] < record.query_alignment_start and r[1] >= record.query_alignment_end) or (r[2] < record.query_alignment_start and r[2] >= record.query_alignment_end) or (r[1] > record.query_alignment_start and r[2] < record.query_alignment_end):
samout.write(record)
else:
print("Did not match reference")
break
samin.close()
samout.close()
def main(args):
# Organize arguments and usage help:
parser = argparse.ArgumentParser(description="Utility tasks relevant to sequencing.\
Be sure to read the pydoc as well, to get more details about each taks. For the most \
part each runtime task is associated to a library function.")
# Input/Output.
parser.add_argument('INPUTTYPE', type=str, choices=['L','T','D','P'],
help=" Specify the type of the TARGETs: \
'T' = The actual input filess. \
'L' = Text file(s) listing the input files. \
'P' = Get list of input files from STDIN pipe. \
'D' = Input data directly from STDIN pipe. \
('D' is compatible with only some of the functions)")
parser.add_argument('TARGET', type=str, nargs='*',
help=" The targets, space- or comma-separated. Usually files. \
Look into the specific task details below for special uses. \
Do not specify with INPUTTYPE 'P' or 'D'.")
parser.add_argument('-O','--out', type=str, nargs=3,
help=" Send individual outputs to individual files instead of \
merging them to STDOUT. Output files will be like \
<out[0]>/<out[1]>target<out[2]>")
# Parameters.
parser.add_argument('-L','--log', action='store_true',
help=" Log this command to ./commands.log.")
parser.add_argument('-c','--comments', action='store_true',
help=" Include commented info to STDOUT or files. (Default don't include)")
parser.add_argument('-C','--STDERRcomments', action="store_false",
help=" Do NOT show info in STDERR. (Default show)")
parser.add_argument('-v','--verbose', action="store_true",
help="Include more details/fields/etc in the output. Task-dependent.")
# Tasks.
parser.add_argument('--StarFinalLogs', type=str, choices=['tab','wiki'],
help="Combine multiple final summary mapping logs by STAR \
into a single text table of the specified format. Compact or verbose.")
parser.add_argument('--premRNA', type=str, choices=['a', 'f'],
help="Infer pre-mRNA coordinates from a GTF annotation. Returns a GTF \
of pre-mRNA transcripts, comprising of the earliest start and latest finish \
coordinates for each gene. ** Choice 'a' returns a pre-mRNA for every gene, whereas \
choice 'f' filters out genes with only one transcript model comprising of a single exon \
** Compatible with 'D' as INPUTTYPE.")
parser.add_argument('--t2g', type=str, choices=['header', 'nohead'],
help="Extract transcript-gene ID pairs from a GTF file. The value determines\
whether to print a column header line or not.")
parser.add_argument('--samFltrRegs', type=str,
help="Filter a headerless SAM file stream according to a SAM header file \
that contains the desired group of regions. This works only with the 'D' INPUTTYPE. \
The input stream is typically the output of `samtools view <somefile.bam>`. \
The output is streamed to STDOUT, typically to be piped back to `samtools view -b`. \
The header file will be prepended to the output stream.")
parser.add_argument('--samPatternStats', type=str, nargs=6,
help="Number and location of matches of the pattern in the reads of BAM files. \
Arguments: [1] (str) anchor sequence, [2] (int) mismatches allowed in the anchor (use 'None' if anchor is regex),\
[3] (char) wildcard character(s) (like 'N' for unknown nucleotides),\
[4] (int) barcode offset (+n downstream of match end, \\-n upstream of match start, \
escaping the minus sign is important), [5] (int) barode length, [6] Number of reads to inspect or 'all'.")
parser.add_argument('--fqPatternStats', type=str, nargs=6,
help="Number and location of matches of the pattern in the reads of FASTQ files. \
Arguments: [1] (str) anchor sequence, [2] (int) mismatches allowed in the anchor (use 'None' if anchor is regex),\
[3] (char) wildcard character(s) (like 'N' for unknown nucleotides),\
[4] (int) barcode offset (+n downstream of match end, \\-n upstream of match start, \
escaping the minus sign is important), [5] (int) barode length, [6] Number of reads to inspect or 'all'.")
parser.add_argument('--demuxA', type=str, nargs=7,
help="Demultiplex a BAM using an anchor sequence to locate the barcodes. \
Arguments: [1] (str) barcodes file, [2] (str) anchor sequence (literal or regex),\
[3] (int) number of mismatches in anchor (use 'None' to indicate anchor is a regex),\
[4] (int) barcode offset (+n downstream of match end, \\-n upstream of match start, \
escaping the minus sign is important), [5] (int) number of mismatches in the barcodes,\
[6] (int) number of bases from read start beyond which to give up looking for the anchor,\
[7] base quality encoding offset.")
parser.add_argument('--demuxBC', type=str, nargs=2,
help="Demultiplex a BAM using the BC: tag field and a look-up table that matches these barcode values to sample names. Arguments: [1] (str) barcodes file, [2] (int) base quality encoding offset (probably 33). One output subdirectory per input bam. Use -O to specify the output destination.")
parser.add_argument('--regex2bed', type=str, nargs=3,
help="Create a bed track annotating the occurences of the specified regex in each strand of the TARGET sequences (FASTA files). [1] regex string, [2] also look in reverse complement yes/no, [3] feature name to display.")
parser.add_argument('--fltrBamReg', type=str, nargs='+',
help="Extract all alignments that overlap the given region. Unlike samtools view, the whole alignments will be returned, not the just the portions that overlap the regions. Regions given in chr:from-to format, inclusive of both ends, 1-based. Use -O to control output files.")
params = parser.parse_args(args)
# CALL DETAILS.
if params.log:
import mylogs
mylogs.log_command()
# if params.STDERRcomments:
# sys.stderr.write(ml.paramstring())
# INPUT.
flist = None
if params.INPUTTYPE == 'P':
# Read files list from STDIN
flist = fu.FilesList()
for line in sys.stdin:
name = line.rstrip("\n").split("\t")[0]
if name != "":
flist.append(name)
elif params.INPUTTYPE == 'L':
# Create the FilesList, by appending the contents of all provided lists.
flist = fu.FilesList().populate_from_files(params.TARGET)
elif params.INPUTTYPE == 'T':
# Create the FilesList by supplying a direct list of files.
flist = fu.FilesList(params.TARGET)
elif params.INPUTTYPE == 'D':
# Data will be read from STDIN. No files needed. Make an empty list.
# Not all functions will switch to STDIN given this. Several will simply do nothing.
flist = fu.FilesList()
else:
sys.exit(ml.errstring("Unknown INPUTTYPE."))
# OUTPUT.
outstream = sys.stdout
outfiles = None
outdir, outpref, outsuff = None, None, None
if params.out:
outdir = fu.expand_fpaths([params.out[0]])[0]
outpref = params.out[1]
outsuff = params.out[2]
outfiles = fu.make_names(flist.aliases, (outdir, outpref, outsuff))
### TASKS ###
# Combine STAR LOGS.
if params.StarFinalLogs:
# Do it.
df = collect_starFinalLogs(flist, all=params.verbose)
# Call details.
if params.comments:
sys.stdout.write(ml.paramstring())
# Formatting choice.
if params.StarFinalLogs == "wiki":
table = "^ " + df.index.name + " ^ " + " ^ ".join(df.columns.values.tolist()) + " ^\n"
for row in df.itertuples():
table += "| " + " | ".join(row) + " |\n"
sys.stdout.write(table)
else:
sys.stdout.write(df.to_csv(sep="\t", header=True, index=True))
# Done.
if params.STDERRcomments:
sys.stderr.write(ml.donestring("collecting STAR final logs"))
# Create PRE-MRNA GTF.
elif params.premRNA:
# Import data and calculate the result.
gtfs = gtf2pandas(flist)
result = gtf2premrna(gtfs, filter=(params.premRNA == 'f'))
# I need the for loop to iterate at least once. Relevant for STDIN input, since I have no input files listed then.
if flist == []:
flist.append("<STDIN>")
# Print the contents.
for i, (myfile, myalias) in flist.enum():
if outfiles:
# Send to individual file instead of STDOUT.
outstream = open(outfiles[i], 'w')
try:
result[i].to_csv(outstream, sep='\t', header=False, index=False, quoting=csv.QUOTE_NONE)
except IOError:
pass
finally:
if outfiles:
# Don't want to accidentally close STDOUT.
outstream.close()
if params.STDERRcomments:
sys.stderr.write(ml.donestring("creating pre-mRNA annotation"))
# Extract transcript and gene ID PAIRS from GTF
elif params.t2g:
# Import GTF.
gtfs = gtf2pandas(flist)
# I need the for loop to iterate at least once. Relevant for STDIN input, since I have no input files listed then.
if flist == []:
flist.append("<STDIN>")
# Print the contents.
hdr=None
if params.t2g == "header":
hdr = True
else:
hdr = False
for i, (myfile, myalias) in flist.enum():
if outfiles:
# Send to individual file instead of STDOUT.
outstream = open(outfiles[i], 'w')
gtfs[i].dropna(axis=0, how='any', thresh=None, subset=None, inplace=True)
try:
gtfs[i].iloc[:,9:11].drop_duplicates().sort_values(["parent_id","target_id"]).to_csv(outstream, sep='\t', header=hdr, index=False, quoting=csv.QUOTE_NONE)
except IOError:
pass
finally:
if outfiles:
# Don't want to accidentally close STDOUT.
outstream.close()
if params.STDERRcomments:
sys.stderr.write(ml.donestring("extracting gene/transcript ID pairs."))
# FILTER a BAM file by REGION
elif params.samFltrRegs:
if not params.INPUTTYPE == 'D':
sys.exit("The only allowed INPUTTYPE is 'D' for streaming of header-less SAM content.")
# Get regions from SAM header file
regf = open(params.samFltrRegs, 'r')
regions = list()
p = re.compile('\sSN:(\S+)')
for line in regf:
sys.stdout.write(line)
m = p.search(line)
if m:
regions.append(m.group(1))
regf.close()
# Parse SAM stream and output only the matching lines.
p = re.compile('.+?\t\S+\t(\S+)')
for r in sys.stdin:
m = p.match(r)
if m and m.group(1) in regions:
sys.stdout.write(r)
if params.STDERRcomments:
sys.stderr.write(ml.donestring("filtering regions in SAM stream"))
# ADAPTER STATS from sam stream or fastq
# read length distribution, pattern position distribution, pattern match
elif params.samPatternStats or params.fqPatternStats:
# Do.
for i,f in enumerate(flist):
result = None
if params.samPatternStats:
rx = (params.samPatternStats[1] == "None")
result = samPatternStats(pattern=params.samPatternStats[0], bam=f, mmCap=(int(params.samPatternStats[1]) if not rx else 0),
bco=int(params.samPatternStats[3]), bcl=int(params.samPatternStats[4]),
literal=(not rx), wild=params.samPatternStats[2], filtered=False, nreads=(int(params.samPatternStats[5]) if params.samPatternStats[5] != "all" else None))
else:
rx = (params.fqPatternStats[1] == "None")
result = fqPatternStats(pattern=params.fqPatternStats[0], fastq=f, mmCap=(int(params.fqPatternStats[1]) if not rx else 0),
bco=int(params.fqPatternStats[3]), bcl=int(params.fqPatternStats[4]),
literal=(not rx), wild=params.fqPatternStats[2], filtered=False, nreads=(int(params.fqPatternStats[5]) if params.samPatternStats[5] != "all" else None))
if outfiles:
# Send to individual file instead of STDOUT.
outstream = open(outfiles[i], 'w')
# Print. The output should be an almost-tidy tab-delimited table.
outstream.write( "\t".join([os.path.basename(f), "Reads", '', '', '', str(result[0]), '(100% total)' + "\n"]) )
for v,c in result[2]:
outstream.write( "\t".join([os.path.basename(f), v, str(c), '(' + "{:.2f}".format(c / result[0] * 100) + '% total)' + "\n"]) )
outstream.write( "\t".join([os.path.basename(f), "Matched", '', '', '', str(result[1]), '(' + "{:.2f}".format(result[1] / result[0] * 100) + '% total)' + "\n"]) )
for v,c in result[3]:
outstream.write( "\t".join([os.path.basename(f), v, str(c), '(' + "{:.2f}".format(c / result[0] * 100) + '% total)' + "\n"]) )
for v,c in result[4]:
outstream.write( "\t".join([os.path.basename(f), v, str(c), '(' + "{:.2f}".format(c / result[0] * 100) + '% total)' + "\n"]) )
for v,c in result[5]:
outstream.write( "\t".join([os.path.basename(f), v, str(c), '(' + "{:.2f}".format(c / result[0] * 100) + '% total)' + "\n"]) )
if outfiles:
# Don't want to accidentally close STDOUT.
outstream.close()
if params.STDERRcomments:
sys.stderr.write(ml.donestring("pattern stats in " + f))
if params.STDERRcomments:
sys.stderr.write(ml.donestring("pattern stats in all BAMs"))
# DEMULTIPLEX BAM by ANCHOR sequence or BC tags
elif params.demuxA or params.demuxBC:
if not outfiles or len(outfiles) != len(flist):
exit("Insufficient output directories specified. Use -O to specify output directory pattern.")
if params.demuxA:
rx = (params.demuxA[2] == 'None')
for i,f in enumerate(flist):
demuxWAnchor(f, barcodes=params.demuxA[0], outputdir=outfiles[i], tally=None,
anchorSeq=params.demuxA[1], anchorRegex=rx, smm=(int(params.demuxA[2]) if not rx else 0),
bcOffset=int(params.demuxA[3]), bcmm=int(params.demuxA[4]),
abort=int(params.demuxA[5]), qualOffset=int(params.demuxA[6]), unmatched=False, trimQC=False)
if params.STDERRcomments:
sys.stderr.write(ml.donestring("anchored demultiplexing of " + f))
elif params.demuxBC:
for i,f in enumerate(flist):
demuxBC(f, barcodes=params.demuxBC[0], outputdir=outfiles[i], tally=None,
qualOffset=int(params.demuxBC[1]), unmatched=False)
if params.STDERRcomments:
sys.stderr.write(ml.donestring("tagged demultiplexing of " + f))
if params.STDERRcomments:
sys.stderr.write(ml.donestring("demultiplexing of all BAMs"))
# BED file of REGEX matches in FASTA sequences
elif params.regex2bed:
name = re.sub('\W', '_', params.regex2bed[2])
res = bed_from_regex(flist, rx = params.regex2bed[0], rc = params.regex2bed[1]=="yes", name = name)
print('track name=' + name + ' description="' + params.regex2bed[0] + '" useScore=0')
for line in res:
print(line)
# FILTER BAM by overlap to give REGIONS
elif params.fltrBamReg:
regions = list()
p = re.compile('(\w+):(\d+)[^0-9](\d+)')
for r in params.fltrBamReg:
m = p.search(r)
if m:
regions.append(( str(m.group(1)), int(m.group(2)), int(m.group(3)) ))
else:
sys.stderr.write(r + ' is not a valid region format\n')
exit(1)
filter_bam_by_region(flist, regions, outfiles)
# # All done.
# if params.STDERRcomments:
# sys.stderr.write(ml.donestring())
##### E X E C U T I O N #####
# Call main only if the module was executed directly.
if __name__ == "__main__":
main(sys.argv[1:])
sys.exit(0)
#EOF
| mit |
wangyum/tensorflow | tensorflow/examples/learn/text_classification_character_cnn.py | 30 | 4292 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is an example of using convolutional networks over characters for
DBpedia dataset to predict class from description of an entity.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
learn = tf.contrib.learn
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
N_FILTERS = 10
FILTER_SHAPE1 = [20, 256]
FILTER_SHAPE2 = [20, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
def char_cnn_model(features, target):
"""Character level convolutional neural network model to predict classes."""
target = tf.one_hot(target, 15, 1, 0)
byte_list = tf.reshape(
tf.one_hot(features, 256), [-1, MAX_DOCUMENT_LENGTH, 256, 1])
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.contrib.layers.convolution2d(
byte_list, N_FILTERS, FILTER_SHAPE1, padding='VALID')
# Add a ReLU for non linearity.
conv1 = tf.nn.relu(conv1)
# Max pooling across output of Convolution+Relu.
pool1 = tf.nn.max_pool(
conv1,
ksize=[1, POOLING_WINDOW, 1, 1],
strides=[1, POOLING_STRIDE, 1, 1],
padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = tf.contrib.layers.convolution2d(
pool1, N_FILTERS, FILTER_SHAPE2, padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
logits = tf.contrib.layers.fully_connected(pool2, 15, activation_fn=None)
loss = tf.losses.softmax_cross_entropy(target, logits)
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adam',
learning_rate=0.01)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main(unused_argv):
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data, size='large')
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
char_processor = learn.preprocessing.ByteProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(char_processor.fit_transform(x_train)))
x_test = np.array(list(char_processor.transform(x_test)))
# Build model
classifier = learn.Estimator(model_fn=char_cnn_model)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
roxyboy/scikit-learn | sklearn/linear_model/tests/test_omp.py | 272 | 7752 | # Author: Vlad Niculae
# Licence: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
assert_warns(RuntimeWarning, orthogonal_mp, X, y, tol=0,
precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
assert_warns(RuntimeWarning, orthogonal_mp, newX, newy, 2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, 1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_return_path_prop_with_gram():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True,
precompute=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False,
precompute=True)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10, cv=5)
ompcv.fit(X, y_)
assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
| bsd-3-clause |
dahlstrom-g/intellij-community | python/testData/debug/test_dataframe.py | 10 | 1388 | import pandas as pd
import numpy as np
df1 = pd.DataFrame({'row': [0, 1, 2],
'One_X': [1.1, 1.1, 1.1],
'Year': [2018, 2019, 2020],
'Winner': [True, False, True],
'Two_Y': [1.22, 1.22, 1.22]})
print(df1) ###line 8
df2 = pd.DataFrame({'row': [0, 1, 2],
'One_X': [1.1, 1.1, 1.1],
'One_Y': [1.2, 1.2, 1.2],
'Two_X': [1.11, 1.11, 1.11],
'Two_Y': [1.22, 1.22, 1.22],
'LABELS': ['A', 'B', 'C']})
print(df2) ##line 16
df3 = pd.DataFrame(data={'Province' : ['ON','QC','BC','AL','AL','MN','ON'],
'City' : ['Toronto','Montreal','Vancouver','Calgary','Edmonton','Winnipeg','Windsor'],
'Sales' : [13,6,16,8,4,3,1]})
table = pd.pivot_table(df3,values=['Sales'],index=['Province'],columns=['City'],aggfunc=np.sum,margins=True)
table.stack('City')
print(df3)
df4 = pd.DataFrame({'row': np.random.random(10000),
'One_X': np.random.random(10000),
'One_Y': np.random.random(10000),
'Two_X': np.random.random(10000),
'Two_Y': np.random.random(10000),
'LABELS': ['A'] * 10000})
print(df4) ##line 31
df5 = pd.DataFrame({'foo_%': np.random.random(10)})
print(df5) #line 34
| apache-2.0 |
SU-ECE-17-7/ibeis | ibeis/other/dbinfo.py | 1 | 84317 | # -*- coding: utf-8 -*-
"""
get_dbinfo is probably the only usefull funciton in here
# This is not the cleanest module
"""
# TODO: ADD COPYRIGHT TAG
from __future__ import absolute_import, division, print_function, unicode_literals
import utool as ut
import six
from ibeis import constants as const
import numpy as np
from collections import OrderedDict
from utool import util_latex
import functools
print, rrr, profile = ut.inject2(__name__, '[dbinfo]')
def print_qd_info(ibs, qaid_list, daid_list, verbose=False):
"""
SeeAlso:
ibs.print_annotconfig_stats(qaid_list, daid_list)
information for a query/database aid configuration
"""
bigstr = functools.partial(ut.truncate_str, maxlen=64, truncmsg=' ~TRUNC~ ')
print('[qd_info] * dbname = %s' % ibs.get_dbname())
print('[qd_info] * qaid_list = %s' % bigstr(str(qaid_list)))
print('[qd_info] * daid_list = %s' % bigstr(str(daid_list)))
print('[qd_info] * len(qaid_list) = %d' % len(qaid_list))
print('[qd_info] * len(daid_list) = %d' % len(daid_list))
print('[qd_info] * intersection = %r' % len(list(set(daid_list).intersection(set(qaid_list)))))
if verbose:
infokw = dict(with_contrib=False, with_agesex=False, with_header=False, verbose=False)
d_info_str = get_dbinfo(ibs, aid_list=daid_list, tag='DataInfo', **infokw)['info_str2']
q_info_str = get_dbinfo(ibs, aid_list=qaid_list, tag='QueryInfo', **infokw)['info_str2']
print(q_info_str)
print('\n')
print(d_info_str)
def sight_resight_prob(N_range, nvisit1, nvisit2, resight):
"""
https://en.wikipedia.org/wiki/Talk:Mark_and_recapture#Statistical_treatment
http://stackoverflow.com/questions/31439875/infinite-summation-in-python/31442749
"""
k, K, n = resight, nvisit1, nvisit2
from scipy.misc import comb
N_range = np.array(N_range)
def integers(start, blk_size=10000, pos=True, neg=False):
x = np.arange(start, start + blk_size)
while True:
if pos:
yield x
if neg:
yield -x - 1
x += blk_size
def converge_inf_sum(func, x_strm, eps=1e-5, axis=0):
# Can still be very slow
total = np.sum(func(x_strm.next()), axis=axis)
#for x_blk in ut.ProgIter(x_strm, lbl='converging'):
for x_blk in x_strm:
diff = np.sum(func(x_blk), axis=axis)
total += diff
#error = abs(np.linalg.norm(diff))
#print('error = %r' % (error,))
if np.sqrt(diff.ravel().dot(diff.ravel())) <= eps:
# Converged
break
return total
numers = (comb(N_range - K, n - k) / comb(N_range, n))
@ut.memoize
def func(N_):
return (comb(N_ - K, n - k) / comb(N_, n))
denoms = []
for N in ut.ProgIter(N_range, lbl='denoms'):
x_strm = integers(start=(N + n - k), blk_size=100)
denom = converge_inf_sum(func, x_strm, eps=1e-3)
denoms.append(denom)
#denom = sum([func(N_) for N_ in range(N_start, N_start * 2)])
probs = numers / np.array(denoms)
return probs
def sight_resight_count(nvisit1, nvisit2, resight):
r"""
Lincoln Petersen Index
The Lincoln-Peterson index is a method used to estimate the total number of
individuals in a population given two independent sets observations. The
likelihood of a population size is a hypergeometric distribution given by
assuming a uniform sampling distribution.
Args:
nvisit1 (int): the number of individuals seen on visit 1.
nvisit2 (int): be the number of individuals seen on visit 2.
resight (int): the number of (matched) individuals seen on both visits.
Returns:
tuple: (pl_index, pl_error)
LaTeX:
\begin{equation}\label{eqn:lpifull}
L(\poptotal \given \nvisit_1, \nvisit_2, \resight) =
\frac{
\binom{\nvisit_1}{\resight}
\binom{\poptotal - \nvisit_1}{\nvisit_2 - \resight}
}{
\binom{\poptotal}{\nvisit_2}
}
\end{equation}
Assuming that $T$ has a uniform prior distribution, the maximum
likelihood estimation of population size given two visits to a
location is:
\begin{equation}\label{eqn:lpi}
\poptotal \approx
\frac{\nvisit_1 \nvisit_2}{\resight} \pm 1.96 \sqrt{\frac{{(\nvisit_1)}^2 (\nvisit_2) (\nvisit_2 - \resight)}{\resight^3}}
\end{equation}
References:
https://en.wikipedia.org/wiki/Mark_and_recapture
https://en.wikipedia.org/wiki/Talk:Mark_and_recapture#Statistical_treatment
https://mail.google.com/mail/u/0/#search/lincoln+peterse+n/14c6b50227f5209f
https://probabilityandstats.wordpress.com/tag/maximum-likelihood-estimate/
http://math.arizona.edu/~jwatkins/o-mle.pdf
CommandLine:
python -m ibeis.other.dbinfo sight_resight_count --show
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.other.dbinfo import * # NOQA
>>> nvisit1 = 100
>>> nvisit2 = 20
>>> resight = 10
>>> (pl_index, pl_error) = sight_resight_count(nvisit1, nvisit2, resight)
>>> result = '(pl_index, pl_error) = %s' % ut.repr2((pl_index, pl_error))
>>> pl_low = max(pl_index - pl_error, 1)
>>> pl_high = pl_index + pl_error
>>> print('pl_low = %r' % (pl_low,))
>>> print('pl_high = %r' % (pl_high,))
>>> print(result)
>>> ut.quit_if_noshow()
>>> import plottool as pt
>>> import scipy, scipy.stats
>>> x = pl_index # np.array([10, 11, 12])
>>> k, N, K, n = resight, x, nvisit1, nvisit2
>>> #k, M, n, N = k, N, k, n # Wiki to SciPy notation
>>> #prob = scipy.stats.hypergeom.cdf(k, N, K, n)
>>> fig = pt.figure(1)
>>> fig.clf()
>>> N_range = np.arange(1, pl_high * 2)
>>> # Something seems to be off
>>> probs = sight_resight_prob(N_range, nvisit1, nvisit2, resight)
>>> pl_prob = sight_resight_prob([pl_index], nvisit1, nvisit2, resight)[0]
>>> pt.plot(N_range, probs, 'b-', label='probability of population size')
>>> pt.plt.title('nvisit1=%r, nvisit2=%r, resight=%r' % (
>>> nvisit1, nvisit2, resight))
>>> pt.plot(pl_index, pl_prob, 'rx', label='Lincoln Peterson Estimate')
>>> pt.plot([pl_low, pl_high], [pl_prob, pl_prob], 'gx-',
>>> label='Lincoln Peterson Error Bar')
>>> pt.legend()
>>> ut.show_if_requested()
"""
import math
try:
nvisit1 = float(nvisit1)
nvisit2 = float(nvisit2)
resight = float(resight)
pl_index = int(math.ceil( (nvisit1 * nvisit2) / resight ))
pl_error_num = float((nvisit1 ** 2) * nvisit2 * (nvisit2 - resight))
pl_error_dom = float(resight ** 3)
pl_error = int(math.ceil(1.96 * math.sqrt(pl_error_num / pl_error_dom)))
except ZeroDivisionError:
# pl_index = 'Undefined - Zero recaptured (k = 0)'
pl_index = 0
pl_error = 0
return pl_index, pl_error
def dans_splits(ibs):
"""
python -m ibeis dans_splits --show
Example:
>>> # DISABLE_DOCTEST GGR
>>> from ibeis.other.dbinfo import * # NOQA
>>> import ibeis
>>> dbdir = '/media/danger/GGR/GGR-IBEIS'
>>> dbdir = dbdir if ut.checkpath(dbdir) else ut.truepath('~/lev/media/danger/GGR/GGR-IBEIS')
>>> ibs = ibeis.opendb(dbdir=dbdir, allow_newdir=False)
>>> import guitool as gt
>>> gt.ensure_qtapp()
>>> win = dans_splits(ibs)
>>> ut.quit_if_noshow()
>>> import plottool as pt
>>> gt.qtapp_loop(qwin=win)
"""
#pair = 9262, 932
dans_aids = [26548, 2190, 9418, 29965, 14738, 26600, 3039, 2742, 8249,
20154, 8572, 4504, 34941, 4040, 7436, 31866, 28291,
16009, 7378, 14453, 2590, 2738, 22442, 26483, 21640, 19003,
13630, 25395, 20015, 14948, 21429, 19740, 7908, 23583, 14301,
26912, 30613, 19719, 21887, 8838, 16184, 9181, 8649, 8276,
14678, 21950, 4925, 13766, 12673, 8417, 2018, 22434, 21149,
14884, 5596, 8276, 14650, 1355, 21725, 21889, 26376, 2867,
6906, 4890, 21524, 6690, 14738, 1823, 35525, 9045, 31723,
2406, 5298, 15627, 31933, 19535, 9137, 21002, 2448,
32454, 12615, 31755, 20015, 24573, 32001, 23637, 3192, 3197,
8702, 1240, 5596, 33473, 23874, 9558, 9245, 23570, 33075,
23721, 24012, 33405, 23791, 19498, 33149, 9558, 4971,
34183, 24853, 9321, 23691, 9723, 9236, 9723, 21078,
32300, 8700, 15334, 6050, 23277, 31164, 14103,
21231, 8007, 10388, 33387, 4319, 26880, 8007, 31164,
32300, 32140]
is_hyrbid = [7123, 7166, 7157, 7158, ] # NOQA
needs_mask = [26836, 29742] # NOQA
justfine = [19862] # NOQA
annots = ibs.annots(dans_aids)
unique_nids = ut.unique(annots.nids)
grouped_aids = ibs.get_name_aids(unique_nids)
annot_groups = ibs._annot_groups(grouped_aids)
split_props = {'splitcase', 'photobomb'}
needs_tag = [len(split_props.intersection(ut.flatten(tags))) == 0 for tags in annot_groups.match_tags]
num_needs_tag = sum(needs_tag)
num_had_split = len(needs_tag) - num_needs_tag
print('num_had_split = %r' % (num_had_split,))
print('num_needs_tag = %r' % (num_needs_tag,))
#all_annot_groups = ibs._annot_groups(ibs.group_annots_by_name(ibs.get_valid_aids())[0])
#all_has_split = [len(split_props.intersection(ut.flatten(tags))) > 0 for tags in all_annot_groups.match_tags]
#num_nondan = sum(all_has_split) - num_had_split
#print('num_nondan = %r' % (num_nondan,))
from ibeis.algo.hots import graph_iden
from ibeis.viz import viz_graph2
import guitool as gt
import plottool as pt
pt.qt4ensure()
gt.ensure_qtapp()
aids_list = ut.compress(grouped_aids, needs_tag)
aids_list = [a for a in aids_list if len(a) > 1]
print('len(aids_list) = %r' % (len(aids_list),))
for aids in aids_list:
infr = graph_iden.AnnotInference(ibs, aids)
infr.initialize_graph()
win = viz_graph2.AnnotGraphWidget(infr=infr, use_image=False,
init_mode='rereview')
win.populate_edge_model()
win.show()
return win
assert False
def fix_splits_interaction(ibs):
"""
python -m ibeis fix_splits_interaction --show
Example:
>>> # DISABLE_DOCTEST GGR
>>> from ibeis.other.dbinfo import * # NOQA
>>> import ibeis
>>> dbdir = '/media/danger/GGR/GGR-IBEIS'
>>> dbdir = dbdir if ut.checkpath(dbdir) else ut.truepath('~/lev/media/danger/GGR/GGR-IBEIS')
>>> ibs = ibeis.opendb(dbdir=dbdir, allow_newdir=False)
>>> import guitool as gt
>>> gt.ensure_qtapp()
>>> win = fix_splits_interaction(ibs)
>>> ut.quit_if_noshow()
>>> import plottool as pt
>>> gt.qtapp_loop(qwin=win)
"""
split_props = {'splitcase', 'photobomb'}
all_annot_groups = ibs._annot_groups(ibs.group_annots_by_name(ibs.get_valid_aids())[0])
all_has_split = [len(split_props.intersection(ut.flatten(tags))) > 0 for tags in all_annot_groups.match_tags]
tosplit_annots = ut.compress(all_annot_groups.annots_list, all_has_split)
tosplit_annots = ut.take(tosplit_annots, ut.argsort(ut.lmap(len, tosplit_annots)))[::-1]
if ut.get_argflag('--reverse'):
tosplit_annots = tosplit_annots[::-1]
print('len(tosplit_annots) = %r' % (len(tosplit_annots),))
aids_list = [a.aids for a in tosplit_annots]
from ibeis.algo.hots import graph_iden
from ibeis.viz import viz_graph2
import guitool as gt
import plottool as pt
pt.qt4ensure()
gt.ensure_qtapp()
for aids in ut.InteractiveIter(aids_list):
infr = graph_iden.AnnotInference(ibs, aids)
infr.initialize_graph()
win = viz_graph2.AnnotGraphWidget(infr=infr, use_image=False,
init_mode='rereview')
win.populate_edge_model()
win.show()
return win
#assert False
def split_analysis(ibs):
"""
CommandLine:
python -m ibeis.other.dbinfo split_analysis --show
python -m ibeis split_analysis --show
python -m ibeis split_analysis --show --good
Ignore:
# mount
sshfs -o idmap=user lev:/ ~/lev
# unmount
fusermount -u ~/lev
Example:
>>> # DISABLE_DOCTEST GGR
>>> from ibeis.other.dbinfo import * # NOQA
>>> import ibeis
>>> dbdir = '/media/danger/GGR/GGR-IBEIS'
>>> dbdir = dbdir if ut.checkpath(dbdir) else ut.truepath('~/lev/media/danger/GGR/GGR-IBEIS')
>>> ibs = ibeis.opendb(dbdir=dbdir, allow_newdir=False)
>>> import guitool as gt
>>> gt.ensure_qtapp()
>>> win = split_analysis(ibs)
>>> ut.quit_if_noshow()
>>> import plottool as pt
>>> gt.qtapp_loop(qwin=win)
>>> #ut.show_if_requested()
"""
#nid_list = ibs.get_valid_nids(filter_empty=True)
import datetime
day1 = datetime.date(2016, 1, 30)
day2 = datetime.date(2016, 1, 31)
filter_kw = {
'multiple': None,
#'view': ['right'],
#'minqual': 'good',
'is_known': True,
'min_pername': 1,
}
aids1 = ibs.filter_annots_general(filter_kw=ut.dict_union(
filter_kw, {
'min_unixtime': ut.datetime_to_posixtime(ut.date_to_datetime(day1, 0.0)),
'max_unixtime': ut.datetime_to_posixtime(ut.date_to_datetime(day1, 1.0)),
})
)
aids2 = ibs.filter_annots_general(filter_kw=ut.dict_union(
filter_kw, {
'min_unixtime': ut.datetime_to_posixtime(ut.date_to_datetime(day2, 0.0)),
'max_unixtime': ut.datetime_to_posixtime(ut.date_to_datetime(day2, 1.0)),
})
)
all_aids = aids1 + aids2
all_annots = ibs.annots(all_aids)
print('%d annots on day 1' % (len(aids1)) )
print('%d annots on day 2' % (len(aids2)) )
print('%d annots overall' % (len(all_annots)) )
print('%d names overall' % (len(ut.unique(all_annots.nids))) )
nid_list, annots_list = all_annots.group(all_annots.nids)
REVIEWED_EDGES = True
if REVIEWED_EDGES:
aids_list = [annots.aids for annots in annots_list]
#aid_pairs = [annots.get_am_aidpairs() for annots in annots_list] # Slower
aid_pairs = ibs.get_unflat_am_aidpairs(aids_list) # Faster
else:
# ALL EDGES
aid_pairs = [annots.get_aidpairs() for annots in annots_list]
speeds_list = ibs.unflat_map(ibs.get_annotpair_speeds, aid_pairs)
import vtool as vt
max_speeds = np.array([vt.safe_max(s, nans=False) for s in speeds_list])
nan_idx = np.where(np.isnan(max_speeds))[0]
inf_idx = np.where(np.isinf(max_speeds))[0]
bad_idx = sorted(ut.unique(ut.flatten([inf_idx, nan_idx])))
ok_idx = ut.index_complement(bad_idx, len(max_speeds))
print('#nan_idx = %r' % (len(nan_idx),))
print('#inf_idx = %r' % (len(inf_idx),))
print('#ok_idx = %r' % (len(ok_idx),))
ok_speeds = max_speeds[ok_idx]
ok_nids = ut.take(nid_list, ok_idx)
ok_annots = ut.take(annots_list, ok_idx)
sortx = np.argsort(ok_speeds)[::-1]
sorted_speeds = np.array(ut.take(ok_speeds, sortx))
sorted_annots = np.array(ut.take(ok_annots, sortx))
sorted_nids = np.array(ut.take(ok_nids, sortx)) # NOQA
sorted_speeds = np.clip(sorted_speeds, 0, 100)
#idx = vt.find_elbow_point(sorted_speeds)
#EXCESSIVE_SPEED = sorted_speeds[idx]
# http://www.infoplease.com/ipa/A0004737.html
# http://www.speedofanimals.com/animals/zebra
#ZEBRA_SPEED_MAX = 64 # km/h
#ZEBRA_SPEED_RUN = 50 # km/h
ZEBRA_SPEED_SLOW_RUN = 20 # km/h
#ZEBRA_SPEED_FAST_WALK = 10 # km/h
#ZEBRA_SPEED_WALK = 7 # km/h
MAX_SPEED = ZEBRA_SPEED_SLOW_RUN
#MAX_SPEED = ZEBRA_SPEED_WALK
#MAX_SPEED = EXCESSIVE_SPEED
flags = sorted_speeds > MAX_SPEED
flagged_ok_annots = ut.compress(sorted_annots, flags)
inf_annots = ut.take(annots_list, inf_idx)
flagged_annots = inf_annots + flagged_ok_annots
print('MAX_SPEED = %r km/h' % (MAX_SPEED,))
print('%d annots with infinite speed' % (len(inf_annots),))
print('%d annots with large speed' % (len(flagged_ok_annots),))
print('Marking all pairs of annots above the threshold as non-matching')
from ibeis.algo.hots import graph_iden
import networkx as nx
progkw = dict(freq=1, bs=True, est_window=len(flagged_annots))
bad_edges_list = []
good_edges_list = []
for annots in ut.ProgIter(flagged_annots, lbl='flag speeding names', **progkw):
edge_to_speeds = annots.get_speeds()
bad_edges = [edge for edge, speed in edge_to_speeds.items() if speed > MAX_SPEED]
good_edges = [edge for edge, speed in edge_to_speeds.items() if speed <= MAX_SPEED]
bad_edges_list.append(bad_edges)
good_edges_list.append(good_edges)
all_bad_edges = ut.flatten(bad_edges_list)
good_edges_list = ut.flatten(good_edges_list)
print('num_bad_edges = %r' % (len(ut.flatten(bad_edges_list)),))
print('num_bad_edges = %r' % (len(ut.flatten(good_edges_list)),))
if 1:
from ibeis.viz import viz_graph2
import guitool as gt
gt.ensure_qtapp()
if ut.get_argflag('--good'):
print('Looking at GOOD (no speed problems) edges')
aid_pairs = good_edges_list
else:
print('Looking at BAD (speed problems) edges')
aid_pairs = all_bad_edges
aids = sorted(list(set(ut.flatten(aid_pairs))))
infr = graph_iden.AnnotInference(ibs, aids, verbose=False)
infr.initialize_graph()
# Use random scores to randomize sort order
rng = np.random.RandomState(0)
scores = (-rng.rand(len(aid_pairs)) * 10).tolist()
infr.graph.add_edges_from(aid_pairs)
if True:
#import utool
#utool.embed()
edge_sample_size = 250
pop_nids = ut.unique(ibs.get_annot_nids(ut.unique(ut.flatten(aid_pairs))))
sorted_pairs = ut.sortedby(aid_pairs, scores)[::-1][0:edge_sample_size]
sorted_nids = ibs.get_annot_nids(ut.take_column(sorted_pairs, 0))
sample_size = len(ut.unique(sorted_nids))
am_rowids = ibs.get_annotmatch_rowid_from_undirected_superkey(*zip(*sorted_pairs))
flags = ut.not_list(ut.flag_None_items(am_rowids))
#am_rowids = ut.compress(am_rowids, flags)
positive_tags = ['SplitCase', 'Photobomb']
flags_list = [ut.replace_nones(ibs.get_annotmatch_prop(tag, am_rowids), 0)
for tag in positive_tags]
print('edge_case_hist: ' + ut.repr3(
['%s %s' % (txt, sum(flags_)) for flags_, txt in zip(flags_list, positive_tags)]))
is_positive = ut.or_lists(*flags_list)
num_positive = sum(ut.lmap(any, ut.group_items(is_positive, sorted_nids).values()))
pop = len(pop_nids)
print('A positive is any edge flagged as a %s' % (ut.conj_phrase(positive_tags, 'or'),))
print('--- Sampling wrt edges ---')
print('edge_sample_size = %r' % (edge_sample_size,))
print('edge_population_size = %r' % (len(aid_pairs),))
print('num_positive_edges = %r' % (sum(is_positive)))
print('--- Sampling wrt names ---')
print('name_population_size = %r' % (pop,))
vt.calc_error_bars_from_sample(sample_size, num_positive, pop, conf_level=.95)
nx.set_edge_attributes(infr.graph, 'score', dict(zip(aid_pairs, scores)))
win = viz_graph2.AnnotGraphWidget(infr=infr, use_image=False,
init_mode=None)
win.populate_edge_model()
win.show()
return win
# Make review interface for only bad edges
infr_list = []
iter_ = list(zip(flagged_annots, bad_edges_list))
for annots, bad_edges in ut.ProgIter(iter_, lbl='creating inference', **progkw):
aids = annots.aids
nids = [1] * len(aids)
infr = graph_iden.AnnotInference(ibs, aids, nids, verbose=False)
infr.initialize_graph()
infr.reset_feedback()
infr.apply_feedback()
infr_list.append(infr)
# Check which ones are user defined as incorrect
#num_positive = 0
#for infr in infr_list:
# flag = np.any(infr.get_feedback_probs()[0] == 0)
# num_positive += flag
#print('num_positive = %r' % (num_positive,))
#pop = len(infr_list)
#print('pop = %r' % (pop,))
iter_ = list(zip(infr_list, bad_edges_list))
for infr, bad_edges in ut.ProgIter(iter_, lbl='adding speed edges', **progkw):
flipped_edges = []
for aid1, aid2 in bad_edges:
if infr.graph.has_edge(aid1, aid2):
flipped_edges.append((aid1, aid2))
infr.add_feedback(aid1, aid2, 'nomatch')
infr.apply_feedback()
nx.set_edge_attributes(infr.graph, '_speed_split', 'orig')
nx.set_edge_attributes(infr.graph, '_speed_split',
{edge: 'new' for edge in bad_edges})
nx.set_edge_attributes(infr.graph, '_speed_split',
{edge: 'flip' for edge in flipped_edges})
#for infr in ut.ProgIter(infr_list, lbl='flagging speeding edges', **progkw):
# annots = ibs.annots(infr.aids)
# edge_to_speeds = annots.get_speeds()
# bad_edges = [edge for edge, speed in edge_to_speeds.items() if speed > MAX_SPEED]
def inference_stats(infr_list_):
relabel_stats = []
for infr in infr_list_:
num_ccs, num_inconsistent = infr.connected_compoment_reviewed_relabel()
state_hist = ut.dict_hist(nx.get_edge_attributes(infr.graph, 'reviewed_state').values())
if 'match' not in state_hist:
state_hist['match'] = 0
hist = ut.dict_hist(nx.get_edge_attributes(infr.graph, '_speed_split').values())
subgraphs = infr.connected_compoment_reviewed_subgraphs()
subgraph_sizes = [len(g) for g in subgraphs]
info = ut.odict([
('num_nonmatch_edges', state_hist['nomatch']),
('num_match_edges', state_hist['match']),
('frac_nonmatch_edges', state_hist['nomatch'] / (state_hist['match'] + state_hist['nomatch'])),
('num_inconsistent', num_inconsistent),
('num_ccs', num_ccs),
('edges_flipped', hist.get('flip', 0)),
('edges_unchanged', hist.get('orig', 0)),
('bad_unreviewed_edges', hist.get('new', 0)),
('orig_size', len(infr.graph)),
('new_sizes', subgraph_sizes),
])
relabel_stats.append(info)
return relabel_stats
relabel_stats = inference_stats(infr_list)
print('\nAll Split Info:')
lines = []
for key in relabel_stats[0].keys():
data = ut.take_column(relabel_stats, key)
if key == 'new_sizes':
data = ut.flatten(data)
lines.append('stats(%s) = %s' % (key, ut.repr2(ut.get_stats(data, use_median=True), precision=2)))
print('\n'.join(ut.align_lines(lines, '=')))
num_incon_list = np.array(ut.take_column(relabel_stats, 'num_inconsistent'))
can_split_flags = num_incon_list == 0
print('Can trivially split %d / %d' % (sum(can_split_flags), len(can_split_flags)))
splittable_infrs = ut.compress(infr_list, can_split_flags)
relabel_stats = inference_stats(splittable_infrs)
print('\nTrival Split Info:')
lines = []
for key in relabel_stats[0].keys():
if key in ['num_inconsistent']:
continue
data = ut.take_column(relabel_stats, key)
if key == 'new_sizes':
data = ut.flatten(data)
lines.append('stats(%s) = %s' % (
key, ut.repr2(ut.get_stats(data, use_median=True), precision=2)))
print('\n'.join(ut.align_lines(lines, '=')))
num_match_edges = np.array(ut.take_column(relabel_stats, 'num_match_edges'))
num_nonmatch_edges = np.array(ut.take_column(relabel_stats, 'num_nonmatch_edges'))
flags1 = np.logical_and(num_match_edges > num_nonmatch_edges, num_nonmatch_edges < 3)
reasonable_infr = ut.compress(splittable_infrs, flags1)
new_sizes_list = ut.take_column(relabel_stats, 'new_sizes')
flags2 = [len(sizes) == 2 and sum(sizes) > 4 and (min(sizes) / max(sizes)) > .3
for sizes in new_sizes_list]
reasonable_infr = ut.compress(splittable_infrs, flags2)
print('#reasonable_infr = %r' % (len(reasonable_infr),))
for infr in ut.InteractiveIter(reasonable_infr):
annots = ibs.annots(infr.aids)
edge_to_speeds = annots.get_speeds()
print('max_speed = %r' % (max(edge_to_speeds.values())),)
infr.initialize_visual_node_attrs()
infr.apply_cuts()
infr.show_graph(use_image=True, only_reviewed=True)
rest = ~np.logical_or(flags1, flags2)
nonreasonable_infr = ut.compress(splittable_infrs, rest)
rng = np.random.RandomState(0)
random_idx = ut.random_indexes(len(nonreasonable_infr) - 1, 15, rng=rng)
random_infr = ut.take(nonreasonable_infr, random_idx)
for infr in ut.InteractiveIter(random_infr):
annots = ibs.annots(infr.aids)
edge_to_speeds = annots.get_speeds()
print('max_speed = %r' % (max(edge_to_speeds.values())),)
infr.initialize_visual_node_attrs()
infr.apply_cuts()
infr.show_graph(use_image=True, only_reviewed=True)
#import scipy.stats as st
#conf_interval = .95
#st.norm.cdf(conf_interval)
# view-source:http://www.surveysystem.com/sscalc.htm
#zval = 1.96 # 95 percent confidence
#zValC = 3.8416 #
#zValC = 6.6564
#import statsmodels.stats.api as sms
#es = sms.proportion_effectsize(0.5, 0.75)
#sms.NormalIndPower().solve_power(es, power=0.9, alpha=0.05, ratio=1)
pop = 279
num_positive = 3
sample_size = 15
conf_level = .95
#conf_level = .99
vt.calc_error_bars_from_sample(sample_size, num_positive, pop, conf_level)
print('---')
vt.calc_error_bars_from_sample(sample_size + 38, num_positive, pop, conf_level)
print('---')
vt.calc_error_bars_from_sample(sample_size + 38 / 3, num_positive, pop, conf_level)
print('---')
vt.calc_error_bars_from_sample(15 + 38, num_positive=3, pop=675, conf_level=.95)
vt.calc_error_bars_from_sample(15, num_positive=3, pop=675, conf_level=.95)
pop = 279
#err_frac = .05 # 5%
err_frac = .10 # 10%
conf_level = .95
vt.calc_sample_from_error_bars(err_frac, pop, conf_level)
pop = 675
vt.calc_sample_from_error_bars(err_frac, pop, conf_level)
vt.calc_sample_from_error_bars(.05, pop, conf_level=.95, prior=.1)
vt.calc_sample_from_error_bars(.05, pop, conf_level=.68, prior=.2)
vt.calc_sample_from_error_bars(.10, pop, conf_level=.68)
vt.calc_error_bars_from_sample(100, num_positive=5, pop=675, conf_level=.95)
vt.calc_error_bars_from_sample(100, num_positive=5, pop=675, conf_level=.68)
#flagged_nids = [a.nids[0] for a in flagged_annots]
#all_nids = ibs.get_valid_nids()
#remain_nids = ut.setdiff(all_nids, flagged_nids)
#nAids_list = np.array(ut.lmap(len, ibs.get_name_aids(all_nids)))
#nAids_list = np.array(ut.lmap(len, ibs.get_name_aids(remain_nids)))
##graph = infr.graph
#g2 = infr.graph.copy()
#[ut.nx_delete_edge_attr(g2, a) for a in infr.visual_edge_attrs]
#g2.edge
def estimate_ggr_count(ibs):
"""
Example:
>>> # DISABLE_DOCTEST GGR
>>> from ibeis.other.dbinfo import * # NOQA
>>> import ibeis
>>> dbdir = ut.truepath('~/lev/media/danger/GGR/GGR-IBEIS')
>>> ibs = ibeis.opendb(dbdir='/home/joncrall/lev/media/danger/GGR/GGR-IBEIS')
"""
import datetime
day1 = datetime.date(2016, 1, 30)
day2 = datetime.date(2016, 1, 31)
filter_kw = {
'multiple': None,
'minqual': 'good',
'is_known': True,
'min_pername': 1,
'view': ['right'],
}
print('\nOnly Single-Animal-In-Annotation:')
filter_kw['multiple'] = False
estimate_twoday_count(ibs, day1, day2, filter_kw)
print('\nOnly Multi-Animal-In-Annotation:')
filter_kw['multiple'] = True
estimate_twoday_count(ibs, day1, day2, filter_kw)
print('\nUsing Both:')
filter_kw['multiple'] = None
return estimate_twoday_count(ibs, day1, day2, filter_kw)
def estimate_twoday_count(ibs, day1, day2, filter_kw):
#gid_list = ibs.get_valid_gids()
all_images = ibs.images()
dates = [dt.date() for dt in all_images.datetime]
date_to_images = all_images.group_items(dates)
date_to_images = ut.sort_dict(date_to_images)
#date_hist = ut.map_dict_vals(len, date2_gids)
#print('date_hist = %s' % (ut.repr2(date_hist, nl=2),))
verbose = 0
visit_dates = [day1, day2]
visit_info_list_ = []
for day in visit_dates:
images = date_to_images[day]
aids = ut.flatten(images.aids)
aids = ibs.filter_annots_general(aids, filter_kw=filter_kw,
verbose=verbose)
nids = ibs.get_annot_name_rowids(aids)
grouped_aids = ut.group_items(aids, nids)
unique_nids = ut.unique(list(grouped_aids.keys()))
if False:
aids_list = ut.take(grouped_aids, unique_nids)
for aids in aids_list:
if len(aids) > 30:
break
timedeltas_list = ibs.get_unflat_annots_timedelta_list(aids_list)
# Do the five second rule
marked_thresh = 5
flags = []
for nid, timedeltas in zip(unique_nids, timedeltas_list):
flags.append(timedeltas.max() > marked_thresh)
print('Unmarking %d names' % (len(flags) - sum(flags)))
unique_nids = ut.compress(unique_nids, flags)
grouped_aids = ut.dict_subset(grouped_aids, unique_nids)
unique_aids = ut.flatten(list(grouped_aids.values()))
info = {
'unique_nids': unique_nids,
'grouped_aids': grouped_aids,
'unique_aids': unique_aids,
}
visit_info_list_.append(info)
# Estimate statistics
from ibeis.other import dbinfo
aids_day1, aids_day2 = ut.take_column(visit_info_list_, 'unique_aids')
nids_day1, nids_day2 = ut.take_column(visit_info_list_, 'unique_nids')
resight_nids = ut.isect(nids_day1, nids_day2)
nsight1 = len(nids_day1)
nsight2 = len(nids_day2)
resight = len(resight_nids)
lp_index, lp_error = dbinfo.sight_resight_count(nsight1, nsight2, resight)
if False:
from ibeis.other import dbinfo
print('DAY 1 STATS:')
_ = dbinfo.get_dbinfo(ibs, aid_list=aids_day1) # NOQA
print('DAY 2 STATS:')
_ = dbinfo.get_dbinfo(ibs, aid_list=aids_day2) # NOQA
print('COMBINED STATS:')
_ = dbinfo.get_dbinfo(ibs, aid_list=aids_day1 + aids_day2) # NOQA
print('%d annots on day 1' % (len(aids_day1)) )
print('%d annots on day 2' % (len(aids_day2)) )
print('%d names on day 1' % (nsight1,))
print('%d names on day 2' % (nsight2,))
print('resight = %r' % (resight,))
print('lp_index = %r ± %r' % (lp_index, lp_error))
return nsight1, nsight2, resight, lp_index, lp_error
def draw_twoday_count(ibs, visit_info_list_):
import copy
visit_info_list = copy.deepcopy(visit_info_list_)
aids_day1, aids_day2 = ut.take_column(visit_info_list_, 'aids')
nids_day1, nids_day2 = ut.take_column(visit_info_list_, 'unique_nids')
resight_nids = ut.isect(nids_day1, nids_day2)
if False:
# HACK REMOVE DATA TO MAKE THIS FASTER
num = 20
for info in visit_info_list:
non_resight_nids = list(set(info['unique_nids']) - set(resight_nids))
sample_nids2 = non_resight_nids[0:num] + resight_nids[:num]
info['grouped_aids'] = ut.dict_subset(info['grouped_aids'], sample_nids2)
info['unique_nids'] = sample_nids2
# Build a graph of matches
if False:
debug = False
for info in visit_info_list:
edges = []
grouped_aids = info['grouped_aids']
aids_list = list(grouped_aids.values())
ams_list = ibs.get_annotmatch_rowids_in_cliques(aids_list)
aids1_list = ibs.unflat_map(ibs.get_annotmatch_aid1, ams_list)
aids2_list = ibs.unflat_map(ibs.get_annotmatch_aid2, ams_list)
for ams, aids, aids1, aids2 in zip(ams_list, aids_list, aids1_list, aids2_list):
edge_nodes = set(aids1 + aids2)
##if len(edge_nodes) != len(set(aids)):
# #print('--')
# #print('aids = %r' % (aids,))
# #print('edge_nodes = %r' % (edge_nodes,))
bad_aids = edge_nodes - set(aids)
if len(bad_aids) > 0:
print('bad_aids = %r' % (bad_aids,))
unlinked_aids = set(aids) - edge_nodes
mst_links = list(ut.itertwo(list(unlinked_aids) + list(edge_nodes)[:1]))
bad_aids.add(None)
user_links = [(u, v) for (u, v) in zip(aids1, aids2) if u not in bad_aids and v not in bad_aids]
new_edges = mst_links + user_links
new_edges = [(int(u), int(v)) for u, v in new_edges if u not in bad_aids and v not in bad_aids]
edges += new_edges
info['edges'] = edges
# Add edges between days
grouped_aids1, grouped_aids2 = ut.take_column(visit_info_list, 'grouped_aids')
nids_day1, nids_day2 = ut.take_column(visit_info_list, 'unique_nids')
resight_nids = ut.isect(nids_day1, nids_day2)
resight_aids1 = ut.take(grouped_aids1, resight_nids)
resight_aids2 = ut.take(grouped_aids2, resight_nids)
#resight_aids3 = [list(aids1) + list(aids2) for aids1, aids2 in zip(resight_aids1, resight_aids2)]
ams_list = ibs.get_annotmatch_rowids_between_groups(resight_aids1, resight_aids2)
aids1_list = ibs.unflat_map(ibs.get_annotmatch_aid1, ams_list)
aids2_list = ibs.unflat_map(ibs.get_annotmatch_aid2, ams_list)
between_edges = []
for ams, aids1, aids2, rawaids1, rawaids2 in zip(ams_list, aids1_list, aids2_list, resight_aids1, resight_aids2):
link_aids = aids1 + aids2
rawaids3 = rawaids1 + rawaids2
badaids = ut.setdiff(link_aids, rawaids3)
assert not badaids
user_links = [(int(u), int(v)) for (u, v) in zip(aids1, aids2)
if u is not None and v is not None]
# HACK THIS OFF
user_links = []
if len(user_links) == 0:
# Hack in an edge
between_edges += [(rawaids1[0], rawaids2[0])]
else:
between_edges += user_links
assert np.all(0 == np.diff(np.array(ibs.unflat_map(ibs.get_annot_nids, between_edges)), axis=1))
import plottool as pt
import networkx as nx
#pt.qt4ensure()
#len(list(nx.connected_components(graph1)))
#print(ut.graph_info(graph1))
# Layout graph
layoutkw = dict(
prog='neato',
draw_implicit=False, splines='line',
#splines='curved',
#splines='spline',
#sep=10 / 72,
#prog='dot', rankdir='TB',
)
def translate_graph_to_origin(graph):
x, y, w, h = ut.get_graph_bounding_box(graph)
ut.translate_graph(graph, (-x, -y))
def stack_graphs(graph_list, vert=False, pad=None):
graph_list_ = [g.copy() for g in graph_list]
for g in graph_list_:
translate_graph_to_origin(g)
bbox_list = [ut.get_graph_bounding_box(g) for g in graph_list_]
if vert:
dim1 = 3
dim2 = 2
else:
dim1 = 2
dim2 = 3
dim1_list = np.array([bbox[dim1] for bbox in bbox_list])
dim2_list = np.array([bbox[dim2] for bbox in bbox_list])
if pad is None:
pad = np.mean(dim1_list) / 2
offset1_list = ut.cumsum([0] + [d + pad for d in dim1_list[:-1]])
max_dim2 = max(dim2_list)
offset2_list = [(max_dim2 - d2) / 2 for d2 in dim2_list]
if vert:
t_xy_list = [(d2, d1) for d1, d2 in zip(offset1_list, offset2_list)]
else:
t_xy_list = [(d1, d2) for d1, d2 in zip(offset1_list, offset2_list)]
for g, t_xy in zip(graph_list_, t_xy_list):
ut.translate_graph(g, t_xy)
nx.set_node_attributes(g, 'pin', 'true')
new_graph = nx.compose_all(graph_list_)
#pt.show_nx(new_graph, layout='custom', node_labels=False, as_directed=False) # NOQA
return new_graph
# Construct graph
for count, info in enumerate(visit_info_list):
graph = nx.Graph()
edges = [(int(u), int(v)) for u, v in info['edges']
if u is not None and v is not None]
graph.add_edges_from(edges, attr_dict={'zorder': 10})
nx.set_node_attributes(graph, 'zorder', 20)
# Layout in neato
_ = pt.nx_agraph_layout(graph, inplace=True, **layoutkw) # NOQA
# Extract compoments and then flatten in nid ordering
ccs = list(nx.connected_components(graph))
root_aids = []
cc_graphs = []
for cc_nodes in ccs:
cc = graph.subgraph(cc_nodes)
try:
root_aids.append(list(ut.nx_source_nodes(cc.to_directed()))[0])
except nx.NetworkXUnfeasible:
root_aids.append(list(cc.nodes())[0])
cc_graphs.append(cc)
root_nids = ibs.get_annot_nids(root_aids)
nid2_graph = dict(zip(root_nids, cc_graphs))
resight_nids_ = set(resight_nids).intersection(set(root_nids))
noresight_nids_ = set(root_nids) - resight_nids_
n_graph_list = ut.take(nid2_graph, sorted(noresight_nids_))
r_graph_list = ut.take(nid2_graph, sorted(resight_nids_))
if len(n_graph_list) > 0:
n_graph = nx.compose_all(n_graph_list)
_ = pt.nx_agraph_layout(n_graph, inplace=True, **layoutkw) # NOQA
n_graphs = [n_graph]
else:
n_graphs = []
r_graphs = [stack_graphs(chunk) for chunk in ut.ichunks(r_graph_list, 100)]
if count == 0:
new_graph = stack_graphs(n_graphs + r_graphs, vert=True)
else:
new_graph = stack_graphs(r_graphs[::-1] + n_graphs, vert=True)
#pt.show_nx(new_graph, layout='custom', node_labels=False, as_directed=False) # NOQA
info['graph'] = new_graph
graph1_, graph2_ = ut.take_column(visit_info_list, 'graph')
if False:
_ = pt.show_nx(graph1_, layout='custom', node_labels=False, as_directed=False) # NOQA
_ = pt.show_nx(graph2_, layout='custom', node_labels=False, as_directed=False) # NOQA
graph_list = [graph1_, graph2_]
twoday_graph = stack_graphs(graph_list, vert=True, pad=None)
nx.set_node_attributes(twoday_graph, 'pin', 'true')
if debug:
ut.nx_delete_None_edge_attr(twoday_graph)
ut.nx_delete_None_node_attr(twoday_graph)
print('twoday_graph(pre) info' + ut.repr3(ut.graph_info(twoday_graph), nl=2))
# Hack, no idea why there are nodes that dont exist here
between_edges_ = [edge for edge in between_edges
if twoday_graph.has_node(edge[0]) and twoday_graph.has_node(edge[1])]
twoday_graph.add_edges_from(between_edges_, attr_dict={'alpha': .2, 'zorder': 0})
ut.nx_ensure_agraph_color(twoday_graph)
layoutkw['splines'] = 'line'
layoutkw['prog'] = 'neato'
agraph = pt.nx_agraph_layout(twoday_graph, inplace=True, return_agraph=True, **layoutkw)[-1] # NOQA
if False:
fpath = ut.truepath('~/ggr_graph.png')
agraph.draw(fpath)
ut.startfile(fpath)
if debug:
print('twoday_graph(post) info' + ut.repr3(ut.graph_info(twoday_graph)))
_ = pt.show_nx(twoday_graph, layout='custom', node_labels=False, as_directed=False) # NOQA
def get_dbinfo(ibs, verbose=True,
with_imgsize=False,
with_bytes=False,
with_contrib=False,
with_agesex=False,
with_header=True,
short=False,
tag='dbinfo',
aid_list=None):
"""
Returns dictionary of digestable database information
Infostr is a string summary of all the stats. Prints infostr in addition to
returning locals
Args:
ibs (IBEISController):
verbose (bool):
with_imgsize (bool):
with_bytes (bool):
Returns:
dict:
CommandLine:
python -m ibeis.other.dbinfo --exec-get_dbinfo:0
python -m ibeis.other.dbinfo --test-get_dbinfo:1
python -m ibeis.other.dbinfo --test-get_dbinfo:0 --db NNP_Master3
python -m ibeis.other.dbinfo --test-get_dbinfo:0 --db PZ_Master1
python -m ibeis.other.dbinfo --test-get_dbinfo:0 --db GZ_ALL
python -m ibeis.other.dbinfo --exec-get_dbinfo:0 --db PZ_ViewPoints
python -m ibeis.other.dbinfo --exec-get_dbinfo:0 --db GZ_Master1
python -m ibeis.other.dbinfo --exec-get_dbinfo:0 -a ctrl
python -m ibeis.other.dbinfo --exec-get_dbinfo:0 -a default:minqual=ok,require_timestamp=True --dbdir ~/lev/media/danger/LEWA
python -m ibeis.other.dbinfo --exec-get_dbinfo:0 -a default:minqual=ok,require_timestamp=True --dbdir ~/lev/media/danger/LEWA --loadbackup=0
python -m ibeis.other.dbinfo --exec-get_dbinfo:0 -a default: --dbdir ~/lev/media/danger/LEWA
python -m ibeis.other.dbinfo --exec-get_dbinfo:0 -a default: --dbdir ~/lev/media/danger/LEWA --loadbackup=0
Example1:
>>> # SCRIPT
>>> from ibeis.other.dbinfo import * # NOQA
>>> import ibeis
>>> defaultdb = 'testdb1'
>>> ibs, aid_list = ibeis.testdata_aids(defaultdb, a='default:minqual=ok,view=primary,view_ext1=1')
>>> kwargs = ut.get_kwdefaults(get_dbinfo)
>>> kwargs['verbose'] = False
>>> kwargs['aid_list'] = aid_list
>>> kwargs = ut.parse_dict_from_argv(kwargs)
>>> output = get_dbinfo(ibs, **kwargs)
>>> result = (output['info_str'])
>>> print(result)
>>> #ibs = ibeis.opendb(defaultdb='testdb1')
>>> # <HACK FOR FILTERING>
>>> #from ibeis.expt import cfghelpers
>>> #from ibeis.expt import annotation_configs
>>> #from ibeis.init import filter_annots
>>> #named_defaults_dict = ut.dict_take(annotation_configs.__dict__,
>>> # annotation_configs.TEST_NAMES)
>>> #named_qcfg_defaults = dict(zip(annotation_configs.TEST_NAMES,
>>> # ut.get_list_column(named_defaults_dict, 'qcfg')))
>>> #acfg = cfghelpers.parse_argv_cfg(('--annot-filter', '-a'), named_defaults_dict=named_qcfg_defaults, default=None)[0]
>>> #aid_list = ibs.get_valid_aids()
>>> # </HACK FOR FILTERING>
Example1:
>>> # ENABLE_DOCTEST
>>> from ibeis.other.dbinfo import * # NOQA
>>> import ibeis
>>> verbose = True
>>> short = True
>>> #ibs = ibeis.opendb(db='GZ_ALL')
>>> #ibs = ibeis.opendb(db='PZ_Master0')
>>> ibs = ibeis.opendb('testdb1')
>>> assert ibs.get_dbname() == 'testdb1', 'DO NOT DELETE CONTRIBUTORS OF OTHER DBS'
>>> ibs.delete_contributors(ibs.get_valid_contrib_rowids())
>>> ibs.delete_empty_nids()
>>> #ibs = ibeis.opendb(db='PZ_MTEST')
>>> output = get_dbinfo(ibs, with_contrib=False, verbose=False, short=True)
>>> result = (output['info_str'])
>>> print(result)
+============================
DB Info: testdb1
DB Notes: None
DB NumContrib: 0
----------
# Names = 7
# Names (unassociated) = 0
# Names (singleton) = 5
# Names (multiton) = 2
----------
# Annots = 13
# Annots (unknown) = 4
# Annots (singleton) = 5
# Annots (multiton) = 4
----------
# Img = 13
L============================
"""
# TODO Database size in bytes
# TODO: occurrence, contributors, etc...
# Basic variables
request_annot_subset = False
_input_aid_list = aid_list # NOQA
if aid_list is None:
valid_aids = ibs.get_valid_aids()
valid_nids = ibs.get_valid_nids()
valid_gids = ibs.get_valid_gids()
else:
if isinstance(aid_list, str):
# Hack to get experiment stats on aids
acfg_name_list = [aid_list]
print('Specified custom aids via acfgname %s' % (acfg_name_list,))
from ibeis.expt import experiment_helpers
acfg_list, expanded_aids_list = experiment_helpers.get_annotcfg_list(
ibs, acfg_name_list)
aid_list = sorted(list(set(ut.flatten(ut.flatten(expanded_aids_list)))))
#aid_list =
if verbose:
print('Specified %d custom aids' % (len(aid_list,)))
request_annot_subset = True
valid_aids = aid_list
valid_nids = list(
set(ibs.get_annot_nids(aid_list, distinguish_unknowns=False)) -
{const.UNKNOWN_NAME_ROWID}
)
valid_gids = list(set(ibs.get_annot_gids(aid_list)))
#associated_nids = ibs.get_valid_nids(filter_empty=True) # nids with at least one annotation
# Image info
if verbose:
print('Checking Image Info')
gx2_aids = ibs.get_image_aids(valid_gids)
if request_annot_subset:
# remove annots not in this subset
valid_aids_set = set(valid_aids)
gx2_aids = [list(set(aids).intersection(valid_aids_set)) for aids in gx2_aids]
gx2_nAnnots = np.array(list(map(len, gx2_aids)))
image_without_annots = len(np.where(gx2_nAnnots == 0)[0])
gx2_nAnnots_stats = ut.get_stats_str(gx2_nAnnots, newlines=True, use_median=True)
image_reviewed_list = ibs.get_image_reviewed(valid_gids)
# Name stats
if verbose:
print('Checking Name Info')
nx2_aids = ibs.get_name_aids(valid_nids)
if request_annot_subset:
# remove annots not in this subset
valid_aids_set = set(valid_aids)
nx2_aids = [list(set(aids).intersection(valid_aids_set)) for aids in nx2_aids]
associated_nids = ut.compress(valid_nids, list(map(len, nx2_aids)))
ibs.check_name_mapping_consistency(nx2_aids)
if False:
# Occurrence Info
def compute_annot_occurrence_ids(ibs, aid_list):
from ibeis.algo.preproc import preproc_occurrence
gid_list = ibs.get_annot_gids(aid_list)
gid2_aids = ut.group_items(aid_list, gid_list)
config = {'seconds_thresh': 4 * 60 * 60}
flat_imgsetids, flat_gids = preproc_occurrence.ibeis_compute_occurrences(
ibs, gid_list, config=config, verbose=False)
occurid2_gids = ut.group_items(flat_gids, flat_imgsetids)
occurid2_aids = {oid: ut.flatten(ut.take(gid2_aids, gids)) for oid, gids in occurid2_gids.items()}
return occurid2_aids
import utool
with utool.embed_on_exception_context:
occurid2_aids = compute_annot_occurrence_ids(ibs, valid_aids)
occur_nids = ibs.unflat_map(ibs.get_annot_nids, occurid2_aids.values())
occur_unique_nids = [ut.unique(nids) for nids in occur_nids]
nid2_occurxs = ut.ddict(list)
for occurx, nids in enumerate(occur_unique_nids):
for nid in nids:
nid2_occurxs[nid].append(occurx)
nid2_occurx_single = {nid: occurxs for nid, occurxs in nid2_occurxs.items() if len(occurxs) <= 1}
nid2_occurx_resight = {nid: occurxs for nid, occurxs in nid2_occurxs.items() if len(occurxs) > 1}
singlesight_encounters = ibs.get_name_aids(nid2_occurx_single.keys())
singlesight_annot_stats = ut.get_stats(list(map(len, singlesight_encounters)), use_median=True, use_sum=True)
resight_name_stats = ut.get_stats(list(map(len, nid2_occurx_resight.values())), use_median=True, use_sum=True)
# Encounter Info
def break_annots_into_encounters(aids):
from ibeis.algo.preproc import occurrence_blackbox
import datetime
thresh_sec = datetime.timedelta(minutes=30).seconds
posixtimes = np.array(ibs.get_annot_image_unixtimes_asfloat(aids))
#latlons = ibs.get_annot_image_gps(aids)
labels = occurrence_blackbox.cluster_timespace2(posixtimes, None, thresh_sec=thresh_sec)
return labels
#ave_enc_time = [np.mean(times) for lbl, times in ut.group_items(posixtimes, labels).items()]
#ut.square_pdist(ave_enc_time)
try:
am_rowids = ibs.get_annotmatch_rowids_between_groups([valid_aids], [valid_aids])[0]
aid_pairs = ibs.filter_aidpairs_by_tags(min_num=0, am_rowids=am_rowids)
undirected_tags = ibs.get_aidpair_tags(aid_pairs.T[0], aid_pairs.T[1], directed=False)
tagged_pairs = list(zip(aid_pairs.tolist(), undirected_tags))
tag_dict = ut.groupby_tags(tagged_pairs, undirected_tags)
pair_tag_info = ut.map_dict_vals(len, tag_dict)
reviewed_type_hist = ut.dict_hist(ibs.get_annot_pair_is_reviewed(aid_pairs.T[0], aid_pairs.T[1]))
pair_tag_info['reviewed_type_hist'] = reviewed_type_hist
except Exception:
pair_tag_info = {}
#print(ut.dict_str(pair_tag_info))
# Annot Stats
# TODO: number of images where chips cover entire image
# TODO: total image coverage of annotation
# TODO: total annotation overlap
"""
ax2_unknown = ibs.is_aid_unknown(valid_aids)
ax2_nid = ibs.get_annot_name_rowids(valid_aids)
assert all([nid < 0 if unknown else nid > 0 for nid, unknown in
zip(ax2_nid, ax2_unknown)]), 'bad annot nid'
"""
#
if verbose:
print('Checking Annot Species')
unknown_aids = ut.compress(valid_aids, ibs.is_aid_unknown(valid_aids))
species_list = ibs.get_annot_species_texts(valid_aids)
species2_aids = ut.group_items(valid_aids, species_list)
species2_nAids = {key: len(val) for key, val in species2_aids.items()}
if verbose:
print('Checking Multiton/Singleton Species')
nx2_nAnnots = np.array(list(map(len, nx2_aids)))
# Seperate singleton / multitons
multiton_nxs = np.where(nx2_nAnnots > 1)[0]
singleton_nxs = np.where(nx2_nAnnots == 1)[0]
unassociated_nxs = np.where(nx2_nAnnots == 0)[0]
assert len(np.intersect1d(singleton_nxs, multiton_nxs)) == 0, 'intersecting names'
valid_nxs = np.hstack([multiton_nxs, singleton_nxs])
num_names_with_gt = len(multiton_nxs)
# Annot Info
if verbose:
print('Checking Annot Info')
multiton_aids_list = ut.take(nx2_aids, multiton_nxs)
assert len(set(multiton_nxs)) == len(multiton_nxs)
if len(multiton_aids_list) == 0:
multiton_aids = np.array([], dtype=np.int)
else:
multiton_aids = np.hstack(multiton_aids_list)
assert len(set(multiton_aids)) == len(multiton_aids), 'duplicate annot'
singleton_aids = ut.take(nx2_aids, singleton_nxs)
multiton_nid2_nannots = list(map(len, multiton_aids_list))
# Image size stats
if with_imgsize:
if verbose:
print('Checking ImageSize Info')
gpath_list = ibs.get_image_paths(valid_gids)
def wh_print_stats(wh_list):
if len(wh_list) == 0:
return '{empty}'
wh_list = np.asarray(wh_list)
stat_dict = OrderedDict(
[( 'max', wh_list.max(0)),
( 'min', wh_list.min(0)),
('mean', wh_list.mean(0)),
( 'std', wh_list.std(0))])
def arr2str(var):
return ('[' + (
', '.join(list(map(lambda x: '%.1f' % x, var)))
) + ']')
ret = (',\n '.join([
'%s:%s' % (key, arr2str(val))
for key, val in stat_dict.items()
]))
return '{\n ' + ret + '\n}'
print('reading image sizes')
# Image size stats
img_size_list = ibs.get_image_sizes(valid_gids)
img_size_stats = wh_print_stats(img_size_list)
# Chip size stats
annotation_bbox_list = ibs.get_annot_bboxes(valid_aids)
annotation_bbox_arr = np.array(annotation_bbox_list)
if len(annotation_bbox_arr) == 0:
annotation_size_list = []
else:
annotation_size_list = annotation_bbox_arr[:, 2:4]
chip_size_stats = wh_print_stats(annotation_size_list)
imgsize_stat_lines = [
(' # Img in dir = %d' % len(gpath_list)),
(' Image Size Stats = %s' % (img_size_stats,)),
(' * Chip Size Stats = %s' % (chip_size_stats,)),
]
else:
imgsize_stat_lines = []
if verbose:
print('Building Stats String')
multiton_stats = ut.get_stats_str(multiton_nid2_nannots, newlines=True, use_median=True)
# Time stats
unixtime_list = ibs.get_image_unixtime(valid_gids)
unixtime_list = ut.list_replace(unixtime_list, -1, float('nan'))
#valid_unixtime_list = [time for time in unixtime_list if time != -1]
#unixtime_statstr = ibs.get_image_time_statstr(valid_gids)
if ut.get_argflag('--hackshow-unixtime'):
show_time_distributions(ibs, unixtime_list)
ut.show_if_requested()
unixtime_statstr = ut.get_timestats_str(unixtime_list, newlines=True, full=True)
# GPS stats
gps_list_ = ibs.get_image_gps(valid_gids)
gpsvalid_list = [gps != (-1, -1) for gps in gps_list_]
gps_list = ut.compress(gps_list_, gpsvalid_list)
def get_annot_age_stats(aid_list):
annot_age_months_est_min = ibs.get_annot_age_months_est_min(aid_list)
annot_age_months_est_max = ibs.get_annot_age_months_est_max(aid_list)
age_dict = ut.ddict((lambda : 0))
for min_age, max_age in zip(annot_age_months_est_min, annot_age_months_est_max):
if (min_age is None or min_age < 12) and max_age < 12:
age_dict['Infant'] += 1
elif 12 <= min_age and min_age < 36 and 12 <= max_age and max_age < 36:
age_dict['Juvenile'] += 1
elif 36 <= min_age and (36 <= max_age or max_age is None):
age_dict['Adult'] += 1
else:
print('Found UNKNOWN Age: %r, %r' % (min_age, max_age, ))
age_dict['UNKNOWN'] += 1
return age_dict
def get_annot_sex_stats(aid_list):
annot_sextext_list = ibs.get_annot_sex_texts(aid_list)
sextext2_aids = ut.group_items(aid_list, annot_sextext_list)
sex_keys = list(ibs.const.SEX_TEXT_TO_INT.keys())
assert set(sex_keys) >= set(annot_sextext_list), 'bad keys: ' + str(set(annot_sextext_list) - set(sex_keys))
sextext2_nAnnots = ut.odict([(key, len(sextext2_aids.get(key, []))) for key in sex_keys])
# Filter 0's
sextext2_nAnnots = {key: val for key, val in six.iteritems(sextext2_nAnnots) if val != 0}
return sextext2_nAnnots
if verbose:
print('Checking Other Annot Stats')
qualtext2_nAnnots = ibs.get_annot_qual_stats(valid_aids)
yawtext2_nAnnots = ibs.get_annot_yaw_stats(valid_aids)
agetext2_nAnnots = get_annot_age_stats(valid_aids)
sextext2_nAnnots = get_annot_sex_stats(valid_aids)
if verbose:
print('Checking Contrib Stats')
# Contributor Statistics
# hack remove colon for image alignment
def fix_tag_list(tag_list):
return [None if tag is None else tag.replace(':', ';') for tag in tag_list]
image_contrib_tags = fix_tag_list(ibs.get_image_contributor_tag(valid_gids))
annot_contrib_tags = fix_tag_list(ibs.get_annot_image_contributor_tag(valid_aids))
contrib_tag_to_gids = ut.group_items(valid_gids, image_contrib_tags)
contrib_tag_to_aids = ut.group_items(valid_aids, annot_contrib_tags)
contrib_tag_to_qualstats = {key: ibs.get_annot_qual_stats(aids) for key, aids in six.iteritems(contrib_tag_to_aids)}
contrib_tag_to_viewstats = {key: ibs.get_annot_yaw_stats(aids) for key, aids in six.iteritems(contrib_tag_to_aids)}
contrib_tag_to_nImages = {key: len(val) for key, val in six.iteritems(contrib_tag_to_gids)}
contrib_tag_to_nAnnots = {key: len(val) for key, val in six.iteritems(contrib_tag_to_aids)}
if verbose:
print('Summarizing')
# Summarize stats
num_names = len(valid_nids)
num_names_unassociated = len(valid_nids) - len(associated_nids)
num_names_singleton = len(singleton_nxs)
num_names_multiton = len(multiton_nxs)
num_singleton_annots = len(singleton_aids)
num_multiton_annots = len(multiton_aids)
num_unknown_annots = len(unknown_aids)
num_annots = len(valid_aids)
if with_bytes:
if verbose:
print('Checking Disk Space')
ibsdir_space = ut.byte_str2(ut.get_disk_space(ibs.get_ibsdir()))
dbdir_space = ut.byte_str2(ut.get_disk_space(ibs.get_dbdir()))
imgdir_space = ut.byte_str2(ut.get_disk_space(ibs.get_imgdir()))
cachedir_space = ut.byte_str2(ut.get_disk_space(ibs.get_cachedir()))
if True:
if verbose:
print('Check asserts')
try:
bad_aids = np.intersect1d(multiton_aids, unknown_aids)
_num_names_total_check = num_names_singleton + num_names_unassociated + num_names_multiton
_num_annots_total_check = num_unknown_annots + num_singleton_annots + num_multiton_annots
assert len(bad_aids) == 0, 'intersecting multiton aids and unknown aids'
assert _num_names_total_check == num_names, 'inconsistent num names'
#if not request_annot_subset:
# dont check this if you have an annot subset
assert _num_annots_total_check == num_annots, 'inconsistent num annots'
except Exception as ex:
ut.printex(ex, keys=[
'_num_names_total_check',
'num_names',
'_num_annots_total_check',
'num_annots',
'num_names_singleton',
'num_names_multiton',
'num_unknown_annots',
'num_multiton_annots',
'num_singleton_annots',
])
raise
# Get contributor statistics
contrib_rowids = ibs.get_valid_contrib_rowids()
num_contributors = len(contrib_rowids)
# print
num_tabs = 5
def align2(str_):
return ut.align(str_, ':', ' :')
def align_dict2(dict_):
str_ = ut.dict_str(dict_)
return align2(str_)
header_block_lines = (
[('+============================'), ] + (
[
('+ singleton := single sighting'),
('+ multiton := multiple sightings'),
('--' * num_tabs),
] if not short and with_header else []
)
)
source_block_lines = [
('DB Info: ' + ibs.get_dbname()),
('DB Notes: ' + ibs.get_dbnotes()),
('DB NumContrib: %d' % num_contributors),
]
bytes_block_lines = [
('--' * num_tabs),
('DB Bytes: '),
(' +- dbdir nBytes: ' + dbdir_space),
(' | +- _ibsdb nBytes: ' + ibsdir_space),
(' | | +-imgdir nBytes: ' + imgdir_space),
(' | | +-cachedir nBytes: ' + cachedir_space),
] if with_bytes else []
name_block_lines = [
('--' * num_tabs),
('# Names = %d' % num_names),
('# Names (unassociated) = %d' % num_names_unassociated),
('# Names (singleton) = %d' % num_names_singleton),
('# Names (multiton) = %d' % num_names_multiton),
]
subset_str = ' ' if not request_annot_subset else '(SUBSET)'
annot_block_lines = [
('--' * num_tabs),
('# Annots %s = %d' % (subset_str, num_annots,)),
('# Annots (unknown) = %d' % num_unknown_annots),
('# Annots (singleton) = %d' % num_singleton_annots),
('# Annots (multiton) = %d' % num_multiton_annots),
]
annot_per_basic_block_lines = [
('--' * num_tabs),
('# Annots per Name (multiton) = %s' % (align2(multiton_stats),)),
('# Annots per Image = %s' % (align2(gx2_nAnnots_stats),)),
('# Annots per Species = %s' % (align_dict2(species2_nAids),)),
] if not short else []
occurrence_block_lines = [
('--' * num_tabs),
#('# Occurrence Per Name (Resights) = %s' % (align_dict2(resight_name_stats),)),
#('# Annots per Encounter (Singlesights) = %s' % (align_dict2(singlesight_annot_stats),)),
('# Pair Tag Info (annots) = %s' % (align_dict2(pair_tag_info),)),
] if not short else []
annot_per_qualview_block_lines = [
None if short else '# Annots per Viewpoint = %s' % align_dict2(yawtext2_nAnnots),
None if short else '# Annots per Quality = %s' % align_dict2(qualtext2_nAnnots),
]
annot_per_agesex_block_lines = [
'# Annots per Age = %s' % align_dict2(agetext2_nAnnots),
'# Annots per Sex = %s' % align_dict2(sextext2_nAnnots),
] if not short and with_agesex else []
contrib_block_lines = [
'# Images per contributor = ' + align_dict2(contrib_tag_to_nImages),
'# Annots per contributor = ' + align_dict2(contrib_tag_to_nAnnots),
'# Quality per contributor = ' + ut.dict_str(contrib_tag_to_qualstats, sorted_=True),
'# Viewpoint per contributor = ' + ut.dict_str(contrib_tag_to_viewstats, sorted_=True),
] if with_contrib else []
img_block_lines = [
('--' * num_tabs),
('# Img = %d' % len(valid_gids)),
None if short else ('# Img reviewed = %d' % sum(image_reviewed_list)),
None if short else ('# Img with gps = %d' % len(gps_list)),
#('# Img with timestamp = %d' % len(valid_unixtime_list)),
None if short else ('Img Time Stats = %s' % (align2(unixtime_statstr),)),
]
info_str_lines = (
header_block_lines +
bytes_block_lines +
source_block_lines +
name_block_lines +
annot_block_lines +
annot_per_basic_block_lines +
occurrence_block_lines +
annot_per_qualview_block_lines +
annot_per_agesex_block_lines +
img_block_lines +
contrib_block_lines +
imgsize_stat_lines +
[('L============================'), ]
)
info_str = '\n'.join(ut.filter_Nones(info_str_lines))
info_str2 = ut.indent(info_str, '[{tag}]'.format(tag=tag))
if verbose:
print(info_str2)
locals_ = locals()
return locals_
def hackshow_names(ibs, aid_list, fnum=None):
r"""
Args:
ibs (IBEISController): ibeis controller object
aid_list (list):
CommandLine:
python -m ibeis.other.dbinfo --exec-hackshow_names --show
python -m ibeis.other.dbinfo --exec-hackshow_names --show --db PZ_Master1
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.other.dbinfo import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb(defaultdb='PZ_MTEST')
>>> aid_list = ibs.get_valid_aids()
>>> result = hackshow_names(ibs, aid_list)
>>> print(result)
>>> ut.show_if_requested()
"""
import plottool as pt
import vtool as vt
grouped_aids, nid_list = ibs.group_annots_by_name(aid_list)
grouped_aids = [aids for aids in grouped_aids if len(aids) > 1]
unixtimes_list = ibs.unflat_map(ibs.get_annot_image_unixtimes_asfloat, grouped_aids)
yaws_list = ibs.unflat_map(ibs.get_annot_yaws, grouped_aids)
#markers_list = [[(1, 2, yaw * 360 / (np.pi * 2)) for yaw in yaws] for yaws in yaws_list]
unixtime_list = ut.flatten(unixtimes_list)
timemax = np.nanmax(unixtime_list)
timemin = np.nanmin(unixtime_list)
timerange = timemax - timemin
unixtimes_list = [((unixtimes[:] - timemin) / timerange) for unixtimes in unixtimes_list]
for unixtimes in unixtimes_list:
num_nan = sum(np.isnan(unixtimes))
unixtimes[np.isnan(unixtimes)] = np.linspace(-1, -.5, num_nan)
#ydata_list = [np.arange(len(aids)) for aids in grouped_aids]
sortx_list = vt.argsort_groups(unixtimes_list, reverse=False)
#markers_list = ut.list_ziptake(markers_list, sortx_list)
yaws_list = ut.list_ziptake(yaws_list, sortx_list)
ydatas_list = vt.ziptake(unixtimes_list, sortx_list)
#ydatas_list = sortx_list
#ydatas_list = vt.argsort_groups(unixtimes_list, reverse=False)
# Sort by num members
#ydatas_list = ut.take(ydatas_list, np.argsort(list(map(len, ydatas_list))))
xdatas_list = [np.zeros(len(ydatas)) + count for count, ydatas in enumerate(ydatas_list)]
#markers = ut.flatten(markers_list)
#yaws = np.array(ut.flatten(yaws_list))
y_data = np.array(ut.flatten(ydatas_list))
x_data = np.array(ut.flatten(xdatas_list))
fnum = pt.ensure_fnum(fnum)
pt.figure(fnum=fnum)
ax = pt.gca()
#unique_yaws, groupxs = vt.group_indices(yaws)
ax.scatter(x_data, y_data, color=[1, 0, 0], s=1, marker='.')
#pt.draw_stems(x_data, y_data, marker=markers, setlims=True, linestyle='')
pt.dark_background()
ax = pt.gca()
ax.set_xlim(min(x_data) - .1, max(x_data) + .1)
ax.set_ylim(min(y_data) - .1, max(y_data) + .1)
def show_image_time_distributions(ibs, gid_list):
r"""
Args:
ibs (IBEISController): ibeis controller object
gid_list (list):
CommandLine:
python -m ibeis.other.dbinfo --exec-show_image_time_distributions --show
python -m ibeis.other.dbinfo --exec-show_image_time_distributions --show --db lynx
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.other.dbinfo import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb(defaultdb='testdb1')
>>> aids = ibeis.testdata_aids(ibs=ibs)
>>> gid_list = ut.unique_unordered(ibs.get_annot_gids(aids))
>>> result = show_image_time_distributions(ibs, gid_list)
>>> print(result)
>>> ut.show_if_requested()
"""
unixtime_list = ibs.get_image_unixtime(gid_list)
unixtime_list = np.array(unixtime_list, dtype=np.float)
unixtime_list = ut.list_replace(unixtime_list, -1, float('nan'))
show_time_distributions(ibs, unixtime_list)
def show_time_distributions(ibs, unixtime_list):
r"""
"""
#import vtool as vt
import plottool as pt
unixtime_list = np.array(unixtime_list)
num_nan = np.isnan(unixtime_list).sum()
num_total = len(unixtime_list)
unixtime_list = unixtime_list[~np.isnan(unixtime_list)]
if False:
from matplotlib import dates as mpldates
#data_list = list(map(ut.unixtime_to_datetimeobj, unixtime_list))
n, bins, patches = pt.plt.hist(unixtime_list, 365)
#n_ = list(map(ut.unixtime_to_datetimeobj, n))
#bins_ = list(map(ut.unixtime_to_datetimeobj, bins))
pt.plt.setp(patches, 'facecolor', 'g', 'alpha', 0.75)
ax = pt.gca()
#ax.xaxis.set_major_locator(mpldates.YearLocator())
#hfmt = mpldates.DateFormatter('%y/%m/%d')
#ax.xaxis.set_major_formatter(hfmt)
mpldates.num2date(unixtime_list)
#pt.gcf().autofmt_xdate()
#y = pt.plt.normpdf( bins, unixtime_list.mean(), unixtime_list.std())
#ax.set_xticks(bins_)
#l = pt.plt.plot(bins_, y, 'k--', linewidth=1.5)
else:
pt.draw_time_distribution(unixtime_list)
#pt.draw_histogram()
ax = pt.gca()
ax.set_xlabel('Date')
ax.set_title('Timestamp distribution of %s. #nan=%d/%d' % (
ibs.get_dbname_alias(),
num_nan, num_total))
pt.gcf().autofmt_xdate()
icon = ibs.get_database_icon()
if icon is not None:
#import matplotlib as mpl
#import vtool as vt
ax = pt.gca()
# Overlay a species icon
# http://matplotlib.org/examples/pylab_examples/demo_annotation_box.html
#icon = vt.convert_image_list_colorspace([icon], 'RGB', 'BGR')[0]
pt.overlay_icon(icon, coords=(0, 1), bbox_alignment=(0, 1))
#imagebox = mpl.offsetbox.OffsetImage(icon, zoom=1.0)
##xy = [ax.get_xlim()[0] + 5, ax.get_ylim()[1]]
##ax.set_xlim(1, 100)
##ax.set_ylim(0, 100)
##x = np.array(ax.get_xlim()).sum() / 2
##y = np.array(ax.get_ylim()).sum() / 2
##xy = [x, y]
##print('xy = %r' % (xy,))
##x = np.nanmin(unixtime_list)
##xy = [x, y]
##print('xy = %r' % (xy,))
##ax.get_ylim()[0]]
#xy = [ax.get_xlim()[0], ax.get_ylim()[1]]
#ab = mpl.offsetbox.AnnotationBbox(
# imagebox, xy, xycoords='data',
# xybox=(-0., 0.),
# boxcoords="offset points",
# box_alignment=(0, 1), pad=0.0)
#ax.add_artist(ab)
if ut.get_argflag('--contextadjust'):
#pt.adjust_subplots2(left=.08, bottom=.1, top=.9, wspace=.3, hspace=.1)
pt.adjust_subplots2(use_argv=True)
def latex_dbstats(ibs_list, **kwargs):
r"""
Args:
ibs (IBEISController): ibeis controller object
CommandLine:
python -m ibeis.other.dbinfo --exec-latex_dbstats --dblist testdb1
python -m ibeis.other.dbinfo --exec-latex_dbstats --dblist testdb1 --show
python -m ibeis.other.dbinfo --exec-latex_dbstats --dblist PZ_Master0 testdb1 --show
python -m ibeis.other.dbinfo --exec-latex_dbstats --dblist PZ_Master0 PZ_MTEST GZ_ALL --show
python -m ibeis.other.dbinfo --test-latex_dbstats --dblist GZ_ALL NNP_MasterGIRM_core --show
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.other.dbinfo import * # NOQA
>>> import ibeis
>>> db_list = ut.get_argval('--dblist', type_=list, default=['testdb1'])
>>> ibs_list = [ibeis.opendb(db=db) for db in db_list]
>>> tabular_str = latex_dbstats(ibs_list)
>>> tabular_cmd = ut.latex_newcommand(ut.latex_sanitize_command_name('DatabaseInfo'), tabular_str)
>>> ut.copy_text_to_clipboard(tabular_cmd)
>>> write_fpath = ut.get_argval('--write', type_=str, default=None)
>>> if write_fpath is not None:
>>> fpath = ut.truepath(write_fpath)
>>> text = ut.readfrom(fpath)
>>> new_text = ut.replace_between_tags(text, tabular_cmd, '% <DBINFO>', '% </DBINFO>')
>>> ut.writeto(fpath, new_text)
>>> ut.print_code(tabular_cmd, 'latex')
>>> ut.quit_if_noshow()
>>> ut.render_latex_text('\\noindent \n' + tabular_str)
"""
import ibeis
# Parse for aids test data
aids_list = [ibeis.testdata_aids(ibs=ibs) for ibs in ibs_list]
#dbinfo_list = [get_dbinfo(ibs, with_contrib=False, verbose=False) for ibs in ibs_list]
dbinfo_list = [get_dbinfo(ibs, with_contrib=False, verbose=False, aid_list=aids)
for ibs, aids in zip(ibs_list, aids_list)]
#title = db_name + ' database statistics'
title = 'Database statistics'
stat_title = '# Annotations per name (multiton)'
#col_lbls = [
# 'multiton',
# #'singleton',
# 'total',
# 'multiton',
# 'singleton',
# 'total',
#]
key_to_col_lbls = {
'num_names_multiton': 'multiton',
'num_names_singleton': 'singleton',
'num_names': 'total',
'num_multiton_annots': 'multiton',
'num_singleton_annots': 'singleton',
'num_unknown_annots': 'unknown',
'num_annots': 'total',
}
# Structure of columns / multicolumns
multi_col_keys = [
('# Names', (
'num_names_multiton',
#'num_names_singleton',
'num_names',
)),
('# Annots', (
'num_multiton_annots',
'num_singleton_annots',
#'num_unknown_annots',
'num_annots')),
]
#multicol_lbls = [('# Names', 3), ('# Annots', 3)]
multicol_lbls = [(mcolname, len(mcols)) for mcolname, mcols in multi_col_keys]
# Flatten column labels
col_keys = ut.flatten(ut.get_list_column(multi_col_keys, 1))
col_lbls = ut.dict_take(key_to_col_lbls, col_keys)
row_lbls = []
row_values = []
#stat_col_lbls = ['max', 'min', 'mean', 'std', 'nMin', 'nMax']
stat_col_lbls = ['max', 'min', 'mean', 'std', 'med']
#stat_row_lbls = ['# Annot per Name (multiton)']
stat_row_lbls = []
stat_row_values = []
SINGLE_TABLE = False
EXTRA = True
for ibs, dbinfo_locals in zip(ibs_list, dbinfo_list):
row_ = ut.dict_take(dbinfo_locals, col_keys)
dbname = ibs.get_dbname_alias()
row_lbls.append(dbname)
multiton_annot_stats = ut.get_stats(dbinfo_locals['multiton_nid2_nannots'], use_median=True)
stat_rows = ut.dict_take(multiton_annot_stats, stat_col_lbls)
if SINGLE_TABLE:
row_.extend(stat_rows)
else:
stat_row_lbls.append(dbname)
stat_row_values.append(stat_rows)
row_values.append(row_)
CENTERLINE = False
AS_TABLE = True
tablekw = dict(
astable=AS_TABLE, centerline=CENTERLINE, FORCE_INT=False, precision=2,
col_sep='', multicol_sep='|',
**kwargs)
if EXTRA:
extra_keys = [
#'species2_nAids',
'qualtext2_nAnnots',
'yawtext2_nAnnots',
]
extra_titles = {
'species2_nAids': 'Annotations per species.',
'qualtext2_nAnnots': 'Annotations per quality.',
'yawtext2_nAnnots': 'Annotations per viewpoint.',
}
extra_collbls = ut.ddict(list)
extra_rowvalues = ut.ddict(list)
extra_tables = ut.ddict(list)
for ibs, dbinfo_locals in zip(ibs_list, dbinfo_list):
for key in extra_keys:
extra_collbls[key] = ut.unique_ordered(extra_collbls[key] + list(dbinfo_locals[key].keys()))
extra_collbls['qualtext2_nAnnots'] = ['excellent', 'good', 'ok', 'poor', 'junk', 'UNKNOWN']
#extra_collbls['yawtext2_nAnnots'] = ['backleft', 'left', 'frontleft', 'front', 'frontright', 'right', 'backright', 'back', None]
extra_collbls['yawtext2_nAnnots'] = ['BL', 'L', 'FL', 'F', 'FR', 'R', 'BR', 'B', None]
for ibs, dbinfo_locals in zip(ibs_list, dbinfo_list):
for key in extra_keys:
extra_rowvalues[key].append(ut.dict_take(dbinfo_locals[key], extra_collbls[key], 0))
qualalias = {'UNKNOWN': None}
extra_collbls['yawtext2_nAnnots'] = [ibs.const.YAWALIAS.get(val, val) for val in extra_collbls['yawtext2_nAnnots']]
extra_collbls['qualtext2_nAnnots'] = [qualalias.get(val, val) for val in extra_collbls['qualtext2_nAnnots']]
for key in extra_keys:
extra_tables[key] = ut.util_latex.make_score_tabular(
row_lbls, extra_collbls[key], extra_rowvalues[key],
title=extra_titles[key], col_align='r', table_position='[h!]', **tablekw)
#tabular_str = util_latex.tabular_join(tabular_body_list)
if SINGLE_TABLE:
col_lbls += stat_col_lbls
multicol_lbls += [(stat_title, len(stat_col_lbls))]
count_tabular_str = ut.util_latex.make_score_tabular(
row_lbls, col_lbls, row_values, title=title, multicol_lbls=multicol_lbls, table_position='[ht!]', **tablekw)
#print(row_lbls)
if SINGLE_TABLE:
tabular_str = count_tabular_str
else:
stat_tabular_str = ut.util_latex.make_score_tabular(
stat_row_lbls, stat_col_lbls, stat_row_values, title=stat_title,
col_align='r', table_position='[h!]', **tablekw)
# Make a table of statistics
if tablekw['astable']:
tablesep = '\n%--\n'
else:
tablesep = '\\\\\n%--\n'
if EXTRA:
tabular_str = tablesep.join([count_tabular_str, stat_tabular_str] + ut.dict_take(extra_tables, extra_keys))
else:
tabular_str = tablesep.join([count_tabular_str, stat_tabular_str])
return tabular_str
def get_short_infostr(ibs):
""" Returns printable database information
Args:
ibs (IBEISController): ibeis controller object
Returns:
str: infostr
CommandLine:
python -m ibeis.other.dbinfo --test-get_short_infostr
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.other.dbinfo import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb('testdb1')
>>> infostr = get_short_infostr(ibs)
>>> result = str(infostr)
>>> print(result)
dbname = 'testdb1'
num_images = 13
num_annotations = 13
num_names = 7
"""
dbname = ibs.get_dbname()
#workdir = ut.unixpath(ibs.get_workdir())
num_images = ibs.get_num_images()
num_annotations = ibs.get_num_annotations()
num_names = ibs.get_num_names()
#workdir = %r
infostr = ut.codeblock('''
dbname = %s
num_images = %r
num_annotations = %r
num_names = %r
''' % (ut.repr2(dbname), num_images, num_annotations, num_names))
return infostr
def test_name_consistency(ibs):
"""
Example:
>>> import ibeis
>>> ibs = ibeis.opendb(db='PZ_Master0')
>>> #ibs = ibeis.opendb(db='GZ_ALL')
"""
from ibeis import ibsfuncs
import utool as ut
max_ = -1
#max_ = 10
valid_aids = ibs.get_valid_aids()[0:max_]
valid_nids = ibs.get_valid_nids()[0:max_]
ax2_nid = ibs.get_annot_name_rowids(valid_aids)
nx2_aids = ibs.get_name_aids(valid_nids)
print('len(valid_aids) = %r' % (len(valid_aids),))
print('len(valid_nids) = %r' % (len(valid_nids),))
print('len(ax2_nid) = %r' % (len(ax2_nid),))
print('len(nx2_aids) = %r' % (len(nx2_aids),))
# annots are grouped by names, so mapping aid back to nid should
# result in each list having the same value
_nids_list = ibsfuncs.unflat_map(ibs.get_annot_name_rowids, nx2_aids)
print(_nids_list[-20:])
print(nx2_aids[-20:])
assert all(map(ut.allsame, _nids_list))
def print_feature_info(testres):
"""
draws keypoint statistics for each test configuration
Args:
testres (ibeis.expt.test_result.TestResult): test result
Ignore:
import plottool as pt
pt.qt4ensure()
testres.draw_rank_cdf()
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.other.dbinfo import * # NOQA
>>> import ibeis
>>> ibs, testres = ibeis.testdata_expts(defaultdb='PZ_MTEST', a='timectrl', t='invar:AI=False')
>>> (tex_nKpts, tex_kpts_stats, tex_scale_stats) = feature_info(ibs)
>>> result = ('(tex_nKpts, tex_kpts_stats, tex_scale_stats) = %s' % (ut.repr2((tex_nKpts, tex_kpts_stats, tex_scale_stats)),))
>>> print(result)
>>> ut.quit_if_noshow()
>>> import plottool as pt
>>> ut.show_if_requested()
"""
import vtool as vt
#ibs = testres.ibs
def print_feat_stats(kpts, vecs):
assert len(vecs) == len(kpts), 'disagreement'
print('keypoints and vecs agree')
flat_kpts = np.vstack(kpts)
num_kpts = list(map(len, kpts))
kpt_scale = vt.get_scales(flat_kpts)
num_kpts_stats = ut.get_stats(num_kpts)
scale_kpts_stats = ut.get_stats(kpt_scale)
print('Number of ' + prefix + ' keypoints: ' + ut.repr3(num_kpts_stats, nl=0, precision=2))
print('Scale of ' + prefix + ' keypoints: ' + ut.repr3(scale_kpts_stats, nl=0, precision=2))
for cfgx in range(testres.nConfig):
print('------------------')
ut.colorprint(testres.cfgx2_lbl[cfgx], 'yellow')
qreq_ = testres.cfgx2_qreq_[cfgx]
depc = qreq_.ibs.depc_annot
tablename = 'feat'
prefix_list = ['query', 'data']
config_pair = [qreq_.query_config2_, qreq_.data_config2_]
aids_pair = [qreq_.qaids, qreq_.daids]
for prefix, aids, config in zip(prefix_list, aids_pair, config_pair):
config_ = depc._ensure_config(tablename, config)
ut.colorprint(prefix + ' Config: ' + str(config_), 'blue')
# Get keypoints and SIFT descriptors for this config
kpts = depc.get(tablename, aids, 'kpts', config=config_)
vecs = depc.get(tablename, aids, 'vecs', config=config_)
# Check various stats of these pairs
print_feat_stats(kpts, vecs)
#kpts = np.vstack(cx2_kpts)
#print('[dbinfo] --- LaTeX --- ')
##_printopts = np.get_printoptions()
##np.set_printoptions(precision=3)
#scales = np.array(sorted(scales))
#tex_scale_stats = util_latex.latex_get_stats(r'kpt scale', scales)
#tex_nKpts = util_latex.latex_scalar(r'\# kpts', len(kpts))
#tex_kpts_stats = util_latex.latex_get_stats(r'\# kpts/chip', cx2_nFeats)
#print(tex_nKpts)
#print(tex_kpts_stats)
#print(tex_scale_stats)
##np.set_printoptions(**_printopts)
#print('[dbinfo] ---/LaTeX --- ')
#return (tex_nKpts, tex_kpts_stats, tex_scale_stats)
def cache_memory_stats(ibs, cid_list, fnum=None):
print('[dev stats] cache_memory_stats()')
#kpts_list = ibs.get_annot_kpts(cid_list)
#desc_list = ibs.get_annot_vecs(cid_list)
#nFeats_list = map(len, kpts_list)
gx_list = np.unique(ibs.cx2_gx(cid_list))
bytes_map = {
'chip dbytes': [ut.file_bytes(fpath) for fpath in ibs.get_rchip_path(cid_list)],
'img dbytes': [ut.file_bytes(gpath) for gpath in ibs.gx2_gname(gx_list, full=True)],
#'flann dbytes': ut.file_bytes(flann_fpath),
}
byte_units = {
'GB': 2 ** 30,
'MB': 2 ** 20,
'KB': 2 ** 10,
}
tabular_body_list = [
]
convert_to = 'KB'
for key, val in six.iteritems(bytes_map):
key2 = key.replace('bytes', convert_to)
if isinstance(val, list):
val2 = [bytes_ / byte_units[convert_to] for bytes_ in val]
tex_str = util_latex.latex_get_stats(key2, val2)
else:
val2 = val / byte_units[convert_to]
tex_str = util_latex.latex_scalar(key2, val2)
tabular_body_list.append(tex_str)
tabular = util_latex.tabular_join(tabular_body_list)
print(tabular)
util_latex.render(tabular)
if fnum is None:
fnum = 0
return fnum + 1
if __name__ == '__main__':
"""
CommandLine:
python -m ibeis.other.dbinfo
python -m ibeis.other.dbinfo --allexamples
python -m ibeis.other.dbinfo --allexamples --noface --nosrc
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
| apache-2.0 |
chrsrds/scikit-learn | examples/linear_model/plot_logistic_path.py | 20 | 2278 | #!/usr/bin/env python
"""
==============================================
Regularization path of L1- Logistic Regression
==============================================
Train l1-penalized logistic regression models on a binary classification
problem derived from the Iris dataset.
The models are ordered from strongest regularized to least regularized. The 4
coefficients of the models are collected and plotted as a "regularization
path": on the left-hand side of the figure (strong regularizers), all the
coefficients are exactly 0. When regularization gets progressively looser,
coefficients can get non-zero values one after the other.
Here we choose the SAGA solver because it can efficiently optimize for the
Logistic Regression loss with a non-smooth, sparsity inducing l1 penalty.
Also note that we set a low value for the tolerance to make sure that the model
has converged before collecting the coefficients.
We also use warm_start=True which means that the coefficients of the models are
reused to initialize the next model fit to speed-up the computation of the
full-path.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X /= X.max() # Normalize X to speed-up convergence
# #############################################################################
# Demo path functions
cs = l1_min_c(X, y, loss='log') * np.logspace(0, 7, 16)
print("Computing regularization path ...")
start = time()
clf = linear_model.LogisticRegression(penalty='l1', solver='saga',
tol=1e-6, max_iter=int(1e6),
warm_start=True)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took %0.3fs" % (time() - start))
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_, marker='o')
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title('Logistic Regression Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
BhavyaLight/kaggle-predicting-Red-Hat-Business-Value | Initial_Classification_Models/Ensemble/VotingClassifier.py | 1 | 1176 | import pandas as pd
from Classification import Utility
# test_dataset = Utility.loadModel("../Final/test_randomforest")
# test_dataset.set_index(["activity_id"]).drop('act_0')
test_dataset = pd.read_csv("../../Data/Outputs/Best/randomForest500Model_new.csv")
test_dataset = test_dataset[["activity_id", "outcome"]]
test_dataset["outcome_RF"] = test_dataset["outcome"]
# print(len(test_dataset["outcome_RF"]))
xgb = pd.read_csv("../../Data/XGB.csv")
xgb["out_xgb"] = xgb["outcome"]
lr = pd.read_csv("../../Data/LR.csv")
lr["out_lr"] = lr["outcome"]
manipulation = pd.read_csv("../../Data/Outputs/manipulation.csv")
manipulation["out_man"] = manipulation["outcome"]
# print(len(lr["out_lr"]))
output = pd.merge(xgb, lr, on="activity_id")
output = pd.merge(test_dataset, output, on="activity_id")
output = pd.merge(output, manipulation, on='activity_id')
# print(output)'
print(output.columns)
output["outcome"] = (0.60*output["out_xgb"] + 0.35*output["outcome_RF"] + 0.05*output["out_lr"])
output.set_index(["activity_id"])
# output.loc[len(output)] = ["act_0", "act_0", "act_0", "act_0", "act_0", "act_0"]
Utility.saveInOutputForm(output, "60XGB_35rf_5lr.csv", "ensemble") | mit |
saskartt/kandi | plotReductionCoefficients.py | 1 | 2137 | #!/usr/bin/env python
import sys
import argparse
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from kandiLib import *
from settings import *
'''
Calculate and plot velocity reduction coefficients (following M. Hefny Salim et al / J. Wind. Eng. Ind. Aerodyn. 144 (2015) 84-95)
'''
#==========================================================#
parser = argparse.ArgumentParser(prog='plotReductionCoefficients.py',
description='''Plot time-averaged reduction coefficients for every grid point and ''')
parser.add_argument("-f", "--file", type=str, help="Name of the input netCDF4 file.")
parser.add_argument("-c", "--compare", type=str, help="A netCDF4 file to compare to.")
parser.add_argument("-var", "--variable", type=str, default="w", help="A variable to be processed.")
parser.add_argument("-x", "--xlims", type=float, nargs=2, help="Clip the mask into specified limits in x-direction.")
parser.add_argument("-y", "--ylims", type=float, nargs=2, help="Clip the mask into specified limits in y-direction.")
parser.add_argument("-z", "--zlims", type=float, nargs=2, help="Clip the mask into specified limits in z-direction.")
args = parser.parse_args()
#==========================================================#
# Read in the tree-free case
cds = openDataSet(args.compare)
carr, cx_dims, cy_dims, cz_dims = readVariableFromMask(cds, skip_time_avg, args.variable)
print(np.shape(carr))
# Clip desired domain
carr = clipMask(carr, cx_dims, cy_dims, cz_dims, args.xlims, args.ylims, args.zlims)
carr = calculateTemporalStatistics(carr, "avg")
carr = carr.flatten()
# Read dataset(s)
ds = openDataSet(args.file)
arr, x_dims, y_dims, z_dims = readVariableFromMask(ds, skip_time_avg, args.variable)
arr = clipMask(arr, x_dims, y_dims, z_dims, args.xlims, args.ylims, args.zlims)
arr = calculateTemporalStatistics(arr, "avg")
arr = arr.flatten()
fig = plt.figure()
ax = plt.gca()
plt.scatter(carr,arr)
ax.axhline(y=0, color='k')
ax.axvline(x=0, color='k')
ax.set_xlim([-0.4, 0.4])
ax.set_ylim([-0.4, 0.4])
plt.plot(np.linspace(-0.4,0.4),np.linspace(-0.4,0.4))
plt.show()
| mit |
soerendip42/rdkit | rdkit/Chem/Draw/UnitTestSimilarityMaps.py | 3 | 5002 | # $Id$
#
# Copyright (c) 2013, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Created by Sereina Riniker, Aug 2013
""" unit testing code for molecule drawing
"""
from rdkit import RDConfig
import unittest,os,tempfile
from rdkit import Chem
from rdkit.Chem import Draw
try:
from rdkit.Chem.Draw import SimilarityMaps as sm
except ImportError:
sm = None
from rdkit.RDLogger import logger
logger = logger()
class TestCase(unittest.TestCase):
def setUp(self):
self.mol1 = Chem.MolFromSmiles('c1ccccc1')
self.mol2 = Chem.MolFromSmiles('c1ccncc1')
def testSimilarityMap(self):
# Morgan2 BV
refWeights = [0.5, 0.5, 0.5, -0.5, 0.5, 0.5]
weights = sm.GetAtomicWeightsForFingerprint(self.mol1, self.mol2, lambda m, i: sm.GetMorganFingerprint(m, i, radius=2, fpType='bv'))
for w,r in zip(weights, refWeights): self.assertEqual(w, r)
fig, maxWeight = sm.GetSimilarityMapForFingerprint(self.mol1, self.mol2, lambda m, i: sm.GetMorganFingerprint(m, i, radius=2, fpType='bv'))
self.assertEqual(maxWeight, 0.5)
weights, maxWeight = sm.GetStandardizedWeights(weights)
self.assertEqual(maxWeight, 0.5)
refWeights = [1.0, 1.0, 1.0, -1.0, 1.0, 1.0]
for w,r in zip(weights, refWeights): self.assertEqual(w, r)
weights = sm.GetAtomicWeightsForFingerprint(self.mol1, self.mol2, lambda m, i: sm.GetMorganFingerprint(m, i, fpType='count'))
self.assertTrue(weights[3] < 0)
weights = sm.GetAtomicWeightsForFingerprint(self.mol1, self.mol2, lambda m, i: sm.GetMorganFingerprint(m, i, fpType='bv', useFeatures=True))
self.assertTrue(weights[3] < 0)
# hashed AP BV
refWeights = [0.09523, 0.17366, 0.17366, -0.23809, 0.17366, 0.17366]
weights = sm.GetAtomicWeightsForFingerprint(self.mol1, self.mol2, lambda m, i: sm.GetAPFingerprint(m, i, fpType='bv', nBits=1024))
for w,r in zip(weights, refWeights): self.assertAlmostEqual(w, r, 4)
weights = sm.GetAtomicWeightsForFingerprint(self.mol1, self.mol2, lambda m, i: sm.GetAPFingerprint(m, i, fpType='normal'))
self.assertTrue(weights[3] < 0)
weights = sm.GetAtomicWeightsForFingerprint(self.mol1, self.mol2, lambda m, i: sm.GetAPFingerprint(m, i, fpType='hashed'))
self.assertTrue(weights[3] < 0)
# hashed TT BV
refWeights = [0.5, 0.5, -0.16666, -0.5, -0.16666, 0.5]
weights = sm.GetAtomicWeightsForFingerprint(self.mol1, self.mol2, lambda m, i: sm.GetTTFingerprint(m, i, fpType='bv', nBits=1024, nBitsPerEntry=1))
for w,r in zip(weights, refWeights): self.assertAlmostEqual(w, r, 4)
weights = sm.GetAtomicWeightsForFingerprint(self.mol1, self.mol2, lambda m, i: sm.GetTTFingerprint(m, i, fpType='normal'))
self.assertTrue(weights[3] < 0)
weights = sm.GetAtomicWeightsForFingerprint(self.mol1, self.mol2, lambda m, i: sm.GetTTFingerprint(m, i, fpType='hashed'))
self.assertTrue(weights[3] < 0)
# RDK fingerprint BV
refWeights = [0.42105, 0.42105, 0.42105, -0.32895, 0.42105, 0.42105]
weights = sm.GetAtomicWeightsForFingerprint(self.mol1, self.mol2, lambda m, i: sm.GetRDKFingerprint(m, i, nBits=1024, nBitsPerHash=1))
for w,r in zip(weights, refWeights): self.assertAlmostEqual(w, r, 4)
if __name__ == '__main__':
try:
import matplotlib
from rdkit.Chem.Draw.mplCanvas import Canvas
except ImportError:
pass
except RuntimeError: # happens with GTK can't initialize
pass
else:
unittest.main()
| bsd-3-clause |
fzalkow/scikit-learn | sklearn/cluster/__init__.py | 364 | 1228 | """
The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
algorithms.
"""
from .spectral import spectral_clustering, SpectralClustering
from .mean_shift_ import (mean_shift, MeanShift,
estimate_bandwidth, get_bin_seeds)
from .affinity_propagation_ import affinity_propagation, AffinityPropagation
from .hierarchical import (ward_tree, AgglomerativeClustering, linkage_tree,
FeatureAgglomeration)
from .k_means_ import k_means, KMeans, MiniBatchKMeans
from .dbscan_ import dbscan, DBSCAN
from .bicluster import SpectralBiclustering, SpectralCoclustering
from .birch import Birch
__all__ = ['AffinityPropagation',
'AgglomerativeClustering',
'Birch',
'DBSCAN',
'KMeans',
'FeatureAgglomeration',
'MeanShift',
'MiniBatchKMeans',
'SpectralClustering',
'affinity_propagation',
'dbscan',
'estimate_bandwidth',
'get_bin_seeds',
'k_means',
'linkage_tree',
'mean_shift',
'spectral_clustering',
'ward_tree',
'SpectralBiclustering',
'SpectralCoclustering']
| bsd-3-clause |
apevec/RMS | Utils/StackFFs.py | 2 | 7879 | """ Stacks all maxpixles in the given folder to one image. """
from __future__ import print_function, division, absolute_import
import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
from RMS.Formats.FFfile import read as readFF
from RMS.Formats.FFfile import validFFName
from RMS.Routines.Image import deinterlaceBlend, blendLighten, loadFlat, applyFlat, adjustLevels, saveImage
from RMS.Routines import MaskImage
def stackFFs(dir_path, file_format, deinterlace=False, subavg=False, filter_bright=False, flat_path=None,
file_list=None, mask=None):
""" Stack FF files in the given folder.
Arguments:
dir_path: [str] Path to the directory with FF files.
file_format: [str] Image format for the stack. E.g. jpg, png, bmp
Keyword arguments:
deinterlace: [bool] True if the image shoud be deinterlaced prior to stacking. False by default.
subavg: [bool] Whether the average pixel image should be subtracted form the max pixel image. False
by default.
filter_bright: [bool] Whether images with bright backgrounds (after average subtraction) should be
skipped. False by defualt.
flat_path: [str] Path to the flat calibration file. None by default. Will only be used if subavg is
False.
file_list: [list] A list of file for stacking. False by default, in which case all FF files in the
given directory will be used.
mask: [MaskStructure] Mask to apply to the stack. None by default.
Return:
stack_path, merge_img:
- stack_path: [str] Path of the save stack.
- merge_img: [ndarray] Numpy array of the stacked image.
"""
# Load the flat if it was given
flat = None
if flat_path != '':
# Try finding the default flat
if flat_path is None:
flat_path = dir_path
flat_file = 'flat.bmp'
else:
flat_path, flat_file = os.path.split(flat_path)
flat_full_path = os.path.join(flat_path, flat_file)
if os.path.isfile(flat_full_path):
# Load the flat
flat = loadFlat(flat_path, flat_file)
print('Loaded flat:', flat_full_path)
first_img = True
n_stacked = 0
total_ff_files = 0
merge_img = None
# If the list of files was not given, take all files in the given folder
if file_list is None:
file_list = sorted(os.listdir(dir_path))
# List all FF files in the current dir
for ff_name in file_list:
if validFFName(ff_name):
# Load FF file
ff = readFF(dir_path, ff_name)
# Skip the file if it is corruped
if ff is None:
continue
total_ff_files += 1
maxpixel = ff.maxpixel
avepixel = ff.avepixel
# Dinterlace the images
if deinterlace:
maxpixel = deinterlaceBlend(maxpixel)
avepixel = deinterlaceBlend(avepixel)
# If the flat was given, apply it to the image, only if no subtraction is done
if (flat is not None) and not subavg:
maxpixel = applyFlat(maxpixel, flat)
avepixel = applyFlat(avepixel, flat)
# Reject the image if the median subtracted image is too bright. This usually means that there
# are clouds on the image which can ruin the stack
if filter_bright:
img = maxpixel - avepixel
# Compute surface brightness
median = np.median(img)
# Compute top detection pixels
top_brightness = np.percentile(img, 99.9)
# Reject all images where the median brightness is high
# Preserve images with very bright detections
if (median > 10) and (top_brightness < (2**(8*img.itemsize) - 10)):
print('Skipping: ', ff_name, 'median:', median, 'top brightness:', top_brightness)
continue
# Subtract the average from maxpixel
if subavg:
img = maxpixel - avepixel
else:
img = maxpixel
if first_img:
merge_img = np.copy(img)
first_img = False
n_stacked += 1
continue
print('Stacking: ', ff_name)
# Blend images 'if lighter'
merge_img = blendLighten(merge_img, img)
n_stacked += 1
# If the number of stacked image is less than 20% of the given images, stack without filtering
if filter_bright and (n_stacked < 0.2*total_ff_files):
return stackFFs(dir_path, file_format, deinterlace=deinterlace, subavg=subavg,
filter_bright=False, flat_path=flat_path, file_list=file_list)
# If no images were stacked, do nothing
if n_stacked == 0:
return None, None
# Extract the name of the night directory which contains the FF files
night_dir = os.path.basename(dir_path)
stack_path = os.path.join(dir_path, night_dir + '_stack_{:d}_meteors.'.format(n_stacked) + file_format)
print("Saving stack to:", stack_path)
# Stretch the levels
merge_img = adjustLevels(merge_img, np.percentile(merge_img, 0.5), 1.3, np.percentile(merge_img, 99.9))
# Apply the mask, if given
if mask is not None:
merge_img = MaskImage.applyMask(merge_img, mask)
# Save the blended image
saveImage(stack_path, merge_img)
return stack_path, merge_img
if __name__ == '__main__':
### COMMAND LINE ARGUMENTS
# Init the command line arguments parser
arg_parser = argparse.ArgumentParser(description="Stacks all maxpixles in the given folder to one image.")
arg_parser.add_argument('dir_path', nargs=1, metavar='DIR_PATH', type=str, \
help='Path to directory with FF files.')
arg_parser.add_argument('file_format', nargs=1, metavar='FILE_FORMAT', type=str, \
help='File format of the image, e.g. jpg or png.')
arg_parser.add_argument('-d', '--deinterlace', action="store_true", \
help="""Deinterlace the image before stacking. """)
arg_parser.add_argument('-s', '--subavg', action="store_true", \
help="""Subtract the average image from maxpixel before stacking. """)
arg_parser.add_argument('-b', '--brightfilt', action="store_true", \
help="""Rejects images with very bright background, which are often clouds. """)
arg_parser.add_argument('-x', '--hideplot', action="store_true", \
help="""Don't show the stack on the screen after stacking. """)
arg_parser.add_argument('-f', '--flat', nargs='?', metavar='FLAT_PATH', type=str, default='',
help="Apply a given flat frame. If no path to the flat is given, flat.bmp from the folder will be taken.")
arg_parser.add_argument('-m', '--mask', metavar='MASK_PATH', type=str,
help="Apply a given mask. If no path to the mask is given, mask.bmp from the folder will be taken.")
# Parse the command line arguments
cml_args = arg_parser.parse_args()
#########################
# Load the mask
mask = None
if cml_args.mask is not None:
if os.path.exists(cml_args.mask):
mask_path = os.path.abspath(cml_args.mask)
print('Loading mask:', mask_path)
mask = MaskImage.loadMask(mask_path)
# Run stacking
stack_path, merge_img = stackFFs(cml_args.dir_path[0], cml_args.file_format[0], \
deinterlace=cml_args.deinterlace, subavg=cml_args.subavg, filter_bright=cml_args.brightfilt, \
flat_path=cml_args.flat, mask=mask)
if not cml_args.hideplot:
# Plot the blended image
plt.imshow(merge_img, cmap='gray', vmin=0, vmax=255)
plt.show()
| gpl-3.0 |
clemkoa/scikit-learn | sklearn/utils/tests/test_utils.py | 27 | 9605 | from itertools import chain, product
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import pinv2
from scipy.sparse.csgraph import laplacian
from sklearn.utils.testing import (assert_equal, assert_raises, assert_true,
assert_almost_equal, assert_array_equal,
SkipTest, assert_raises_regex,
assert_greater_equal, ignore_warnings)
from sklearn.utils import check_random_state
from sklearn.utils import deprecated
from sklearn.utils import resample
from sklearn.utils import safe_mask
from sklearn.utils import column_or_1d
from sklearn.utils import safe_indexing
from sklearn.utils import shuffle
from sklearn.utils import gen_even_slices
from sklearn.utils.extmath import pinvh
from sklearn.utils.arpack import eigsh
from sklearn.utils.mocking import MockDataFrame
def test_make_rng():
# Check the check_random_state utility function behavior
assert_true(check_random_state(None) is np.random.mtrand._rand)
assert_true(check_random_state(np.random) is np.random.mtrand._rand)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(42).randint(100) == rng_42.randint(100))
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(rng_42) is rng_42)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(43).randint(100) != rng_42.randint(100))
assert_raises(ValueError, check_random_state, "some invalid seed")
def test_deprecated():
# Test whether the deprecated decorator issues appropriate warnings
# Copied almost verbatim from http://docs.python.org/library/warnings.html
# First a function...
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated()
def ham():
return "spam"
spam = ham()
assert_equal(spam, "spam") # function must remain usable
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
# ... then a class.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated("don't use this")
class Ham(object):
SPAM = 1
ham = Ham()
assert_true(hasattr(ham, "SPAM"))
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
def test_resample():
# Border case not worth mentioning in doctests
assert_true(resample() is None)
# Check that invalid arguments yield ValueError
assert_raises(ValueError, resample, [0], [0, 1])
assert_raises(ValueError, resample, [0, 1], [0, 1],
replace=False, n_samples=3)
assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42)
# Issue:6581, n_samples can be more when replace is True (default).
assert_equal(len(resample([1, 2], n_samples=5)), 5)
def test_safe_mask():
random_state = check_random_state(0)
X = random_state.rand(5, 4)
X_csr = sp.csr_matrix(X)
mask = [False, False, True, True, True]
mask = safe_mask(X, mask)
assert_equal(X[mask].shape[0], 3)
mask = safe_mask(X_csr, mask)
assert_equal(X_csr[mask].shape[0], 3)
@ignore_warnings # Test deprecated backport to be removed in 0.21
def test_pinvh_simple_real():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=np.float64)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
@ignore_warnings # Test deprecated backport to be removed in 0.21
def test_pinvh_nonpositive():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_almost_equal(a_pinv, a_pinvh)
@ignore_warnings # Test deprecated backport to be removed in 0.21
def test_pinvh_simple_complex():
a = (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
+ 1j * np.array([[10, 8, 7], [6, 5, 4], [3, 2, 1]]))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
@ignore_warnings # Test deprecated backport to be removed in 0.21
def test_arpack_eigsh_initialization():
# Non-regression test that shows null-space computation is better with
# initialization of eigsh from [-1,1] instead of [0,1]
random_state = check_random_state(42)
A = random_state.rand(50, 50)
A = np.dot(A.T, A) # create s.p.d. matrix
A = laplacian(A) + 1e-7 * np.identity(A.shape[0])
k = 5
# Test if eigsh is working correctly
# New initialization [-1,1] (as in original ARPACK)
# Was [0,1] before, with which this test could fail
v0 = random_state.uniform(-1, 1, A.shape[0])
w, _ = eigsh(A, k=k, sigma=0.0, v0=v0)
# Eigenvalues of s.p.d. matrix should be nonnegative, w[0] is smallest
assert_greater_equal(w[0], 0)
def test_column_or_1d():
EXAMPLES = [
("binary", ["spam", "egg", "spam"]),
("binary", [0, 1, 0, 1]),
("continuous", np.arange(10) / 20.),
("multiclass", [1, 2, 3]),
("multiclass", [0, 1, 2, 2, 0]),
("multiclass", [[1], [2], [3]]),
("multilabel-indicator", [[0, 1, 0], [0, 0, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("multiclass-multioutput", [[1, 1], [2, 2], [3, 1]]),
("multiclass-multioutput", [[5, 1], [4, 2], [3, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("continuous-multioutput", np.arange(30).reshape((-1, 3))),
]
for y_type, y in EXAMPLES:
if y_type in ["binary", 'multiclass', "continuous"]:
assert_array_equal(column_or_1d(y), np.ravel(y))
else:
assert_raises(ValueError, column_or_1d, y)
def test_safe_indexing():
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
inds = np.array([1, 2])
X_inds = safe_indexing(X, inds)
X_arrays = safe_indexing(np.array(X), inds)
assert_array_equal(np.array(X_inds), X_arrays)
assert_array_equal(np.array(X_inds), np.array(X)[inds])
def test_safe_indexing_pandas():
try:
import pandas as pd
except ImportError:
raise SkipTest("Pandas not found")
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = pd.DataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
# fun with read-only data in dataframes
# this happens in joblib memmapping
X.setflags(write=False)
X_df_readonly = pd.DataFrame(X)
inds_readonly = inds.copy()
inds_readonly.setflags(write=False)
for this_df, this_inds in product([X_df, X_df_readonly],
[inds, inds_readonly]):
with warnings.catch_warnings(record=True):
X_df_indexed = safe_indexing(this_df, this_inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_safe_indexing_mock_pandas():
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = MockDataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_shuffle_on_ndim_equals_three():
def to_tuple(A): # to make the inner arrays hashable
return tuple(tuple(tuple(C) for C in B) for B in A)
A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2)
S = set(to_tuple(A))
shuffle(A) # shouldn't raise a ValueError for dim = 3
assert_equal(set(to_tuple(A)), S)
def test_shuffle_dont_convert_to_array():
# Check that shuffle does not try to convert to numpy arrays with float
# dtypes can let any indexable datastructure pass-through.
a = ['a', 'b', 'c']
b = np.array(['a', 'b', 'c'], dtype=object)
c = [1, 2, 3]
d = MockDataFrame(np.array([['a', 0],
['b', 1],
['c', 2]],
dtype=object))
e = sp.csc_matrix(np.arange(6).reshape(3, 2))
a_s, b_s, c_s, d_s, e_s = shuffle(a, b, c, d, e, random_state=0)
assert_equal(a_s, ['c', 'b', 'a'])
assert_equal(type(a_s), list)
assert_array_equal(b_s, ['c', 'b', 'a'])
assert_equal(b_s.dtype, object)
assert_equal(c_s, [3, 2, 1])
assert_equal(type(c_s), list)
assert_array_equal(d_s, np.array([['c', 2],
['b', 1],
['a', 0]],
dtype=object))
assert_equal(type(d_s), MockDataFrame)
assert_array_equal(e_s.toarray(), np.array([[4, 5],
[2, 3],
[0, 1]]))
def test_gen_even_slices():
# check that gen_even_slices contains all samples
some_range = range(10)
joined_range = list(chain(*[some_range[slice] for slice in
gen_even_slices(10, 3)]))
assert_array_equal(some_range, joined_range)
# check that passing negative n_chunks raises an error
slices = gen_even_slices(10, -1)
assert_raises_regex(ValueError, "gen_even_slices got n_packs=-1, must be"
" >=1", next, slices)
| bsd-3-clause |
cmoutard/mne-python | mne/io/base.py | 1 | 93193 | # Authors: Alexandre Gramfort <[email protected]>
# Matti Hamalainen <[email protected]>
# Martin Luessi <[email protected]>
# Denis Engemann <[email protected]>
# Teon Brooks <[email protected]>
# Marijn van Vliet <[email protected]>
#
# License: BSD (3-clause)
import copy
from copy import deepcopy
import warnings
import os
import os.path as op
import numpy as np
from scipy import linalg
from .constants import FIFF
from .pick import pick_types, channel_type, pick_channels, pick_info
from .meas_info import write_meas_info
from .proj import setup_proj, activate_proj, _proj_equal, ProjMixin
from ..channels.channels import (ContainsMixin, UpdateChannelsMixin,
SetChannelsMixin, InterpolationMixin)
from ..channels.montage import read_montage, _set_montage, Montage
from .compensator import set_current_comp
from .write import (start_file, end_file, start_block, end_block,
write_dau_pack16, write_float, write_double,
write_complex64, write_complex128, write_int,
write_id, write_string, _get_split_size)
from ..filter import (low_pass_filter, high_pass_filter, band_pass_filter,
notch_filter, band_stop_filter, resample,
_resample_stim_channels)
from ..fixes import in1d
from ..parallel import parallel_func
from ..utils import (_check_fname, _check_pandas_installed,
_check_pandas_index_arguments,
check_fname, _get_stim_channel, object_hash,
logger, verbose, _time_mask)
from ..viz import plot_raw, plot_raw_psd, plot_raw_psd_topo
from ..defaults import _handle_default
from ..externals.six import string_types
from ..event import find_events, concatenate_events
class ToDataFrameMixin(object):
'''Class to add to_data_frame capabilities to certain classes.'''
def _get_check_picks(self, picks, picks_check):
if picks is None:
picks = list(range(self.info['nchan']))
else:
if not in1d(picks, np.arange(len(picks_check))).all():
raise ValueError('At least one picked channel is not present '
'in this object instance.')
return picks
def to_data_frame(self, picks=None, index=None, scale_time=1e3,
scalings=None, copy=True, start=None, stop=None):
"""Export data in tabular structure as a pandas DataFrame.
Columns and indices will depend on the object being converted.
Generally this will include as much relevant information as
possible for the data type being converted. This makes it easy
to convert data for use in packages that utilize dataframes,
such as statsmodels or seaborn.
Parameters
----------
picks : array-like of int | None
If None only MEG and EEG channels are kept
otherwise the channels indices in picks are kept.
index : tuple of str | None
Column to be used as index for the data. Valid string options
are 'epoch', 'time' and 'condition'. If None, all three info
columns will be included in the table as categorial data.
scale_time : float
Scaling to be applied to time units.
scalings : dict | None
Scaling to be applied to the channels picked. If None, defaults to
``scalings=dict(eeg=1e6, grad=1e13, mag=1e15, misc=1.0)``.
copy : bool
If true, data will be copied. Else data may be modified in place.
start : int | None
If it is a Raw object, this defines a starting index for creating
the dataframe from a slice. The times will be interpolated from the
index and the sampling rate of the signal.
stop : int | None
If it is a Raw object, this defines a stop index for creating
the dataframe from a slice. The times will be interpolated from the
index and the sampling rate of the signal.
Returns
-------
df : instance of pandas.core.DataFrame
A dataframe suitable for usage with other
statistical/plotting/analysis packages. Column/Index values will
depend on the object type being converted, but should be
human-readable.
"""
from ..epochs import _BaseEpochs
from ..evoked import Evoked
from ..source_estimate import _BaseSourceEstimate
pd = _check_pandas_installed()
mindex = list()
# Treat SourceEstimates special because they don't have the same info
if isinstance(self, _BaseSourceEstimate):
if self.subject is None:
default_index = ['time']
else:
default_index = ['subject', 'time']
data = self.data.T
times = self.times
shape = data.shape
mindex.append(('subject', np.repeat(self.subject, shape[0])))
if isinstance(self.vertices, list):
# surface source estimates
col_names = [i for e in [
['{0} {1}'.format('LH' if ii < 1 else 'RH', vert)
for vert in vertno]
for ii, vertno in enumerate(self.vertices)]
for i in e]
else:
# volume source estimates
col_names = ['VOL {0}'.format(vert) for vert in self.vertices]
elif isinstance(self, (_BaseEpochs, _BaseRaw, Evoked)):
picks = self._get_check_picks(picks, self.ch_names)
if isinstance(self, _BaseEpochs):
default_index = ['condition', 'epoch', 'time']
data = self.get_data()[:, picks, :]
times = self.times
n_epochs, n_picks, n_times = data.shape
data = np.hstack(data).T # (time*epochs) x signals
# Multi-index creation
times = np.tile(times, n_epochs)
id_swapped = dict((v, k) for k, v in self.event_id.items())
names = [id_swapped[k] for k in self.events[:, 2]]
mindex.append(('condition', np.repeat(names, n_times)))
mindex.append(('epoch',
np.repeat(np.arange(n_epochs), n_times)))
col_names = [self.ch_names[k] for k in picks]
elif isinstance(self, (_BaseRaw, Evoked)):
default_index = ['time']
if isinstance(self, _BaseRaw):
data, times = self[picks, start:stop]
elif isinstance(self, Evoked):
data = self.data[picks, :]
times = self.times
n_picks, n_times = data.shape
data = data.T
col_names = [self.ch_names[k] for k in picks]
types = [channel_type(self.info, idx) for idx in picks]
n_channel_types = 0
ch_types_used = []
scalings = _handle_default('scalings', scalings)
for t in scalings.keys():
if t in types:
n_channel_types += 1
ch_types_used.append(t)
for t in ch_types_used:
scaling = scalings[t]
idx = [picks[i] for i in range(len(picks)) if types[i] == t]
if len(idx) > 0:
data[:, idx] *= scaling
else:
# In case some other object gets this mixin w/o an explicit check
raise NameError('Object must be one of Raw, Epochs, Evoked, or ' +
'SourceEstimate. This is {0}'.format(type(self)))
# Make sure that the time index is scaled correctly
times = np.round(times * scale_time)
mindex.append(('time', times))
if index is not None:
_check_pandas_index_arguments(index, default_index)
else:
index = default_index
if copy is True:
data = data.copy()
assert all(len(mdx) == len(mindex[0]) for mdx in mindex)
df = pd.DataFrame(data, columns=col_names)
for i, (k, v) in enumerate(mindex):
df.insert(i, k, v)
if index is not None:
if 'time' in index:
logger.info('Converting time column to int64...')
df['time'] = df['time'].astype(np.int64)
df.set_index(index, inplace=True)
if all(i in default_index for i in index):
df.columns.name = 'signal'
return df
def _check_fun(fun, d, *args, **kwargs):
want_shape = d.shape
d = fun(d, *args, **kwargs)
if not isinstance(d, np.ndarray):
raise TypeError('Return value must be an ndarray')
if d.shape != want_shape:
raise ValueError('Return data must have shape %s not %s'
% (want_shape, d.shape))
return d
class _BaseRaw(ProjMixin, ContainsMixin, UpdateChannelsMixin,
SetChannelsMixin, InterpolationMixin, ToDataFrameMixin):
"""Base class for Raw data
Subclasses must provide the following methods:
* _read_segment_file(self, data, idx, fi, start, stop, cals, mult)
(only needed for types that support on-demand disk reads)
The `_BaseRaw._raw_extras` list can contain whatever data is necessary for
such on-demand reads. For `RawFIF` this means a list of variables formerly
known as ``_rawdirs``.
"""
@verbose
def __init__(self, info, preload=False,
first_samps=(0,), last_samps=None,
filenames=(None,), raw_extras=(None,),
comp=None, orig_comp_grade=None,
orig_format='double', dtype=np.float64,
verbose=None):
# wait until the end to preload data, but triage here
if isinstance(preload, np.ndarray):
# some functions (e.g., filtering) only work w/64-bit data
if preload.dtype not in (np.float64, np.complex128):
raise RuntimeError('datatype must be float64 or complex128, '
'not %s' % preload.dtype)
if preload.dtype != dtype:
raise ValueError('preload and dtype must match')
self._data = preload
self.preload = True
last_samps = [self._data.shape[1] - 1]
load_from_disk = False
else:
if last_samps is None:
raise ValueError('last_samps must be given unless preload is '
'an ndarray')
if preload is False:
self.preload = False
load_from_disk = False
elif preload is not True and not isinstance(preload, string_types):
raise ValueError('bad preload: %s' % preload)
else:
load_from_disk = True
self._last_samps = np.array(last_samps)
self._first_samps = np.array(first_samps)
info._check_consistency() # make sure subclass did a good job
self.info = info
if info.get('buffer_size_sec', None) is None:
raise RuntimeError('Reader error, notify mne-python developers')
cals = np.empty(info['nchan'])
for k in range(info['nchan']):
cals[k] = info['chs'][k]['range'] * info['chs'][k]['cal']
self.verbose = verbose
self._cals = cals
self._raw_extras = list(raw_extras)
self.comp = comp
self._orig_comp_grade = orig_comp_grade
self._filenames = list(filenames)
self.orig_format = orig_format
self._projectors = list()
self._projector = None
self._dtype_ = dtype
# If we have True or a string, actually do the preloading
if load_from_disk:
self._preload_data(preload)
self._update_times()
@property
def _dtype(self):
"""dtype for loading data (property so subclasses can override)"""
# most classes only store real data, they won't need anything special
return self._dtype_
def _read_segment(self, start=0, stop=None, sel=None, data_buffer=None,
projector=None, verbose=None):
"""Read a chunk of raw data
Parameters
----------
start : int, (optional)
first sample to include (first is 0). If omitted, defaults to the
first sample in data.
stop : int, (optional)
First sample to not include.
If omitted, data is included to the end.
sel : array, optional
Indices of channels to select.
data_buffer : array or str, optional
numpy array to fill with data read, must have the correct shape.
If str, a np.memmap with the correct data type will be used
to store the data.
projector : array
SSP operator to apply to the data.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
data : array, [channels x samples]
the data matrix (channels x samples).
times : array, [samples]
returns the time values corresponding to the samples.
"""
# Initial checks
start = int(start)
stop = self.n_times if stop is None else min([int(stop), self.n_times])
if start >= stop:
raise ValueError('No data in this range')
logger.info('Reading %d ... %d = %9.3f ... %9.3f secs...' %
(start, stop - 1, start / float(self.info['sfreq']),
(stop - 1) / float(self.info['sfreq'])))
# Initialize the data and calibration vector
n_sel_channels = self.info['nchan'] if sel is None else len(sel)
# convert sel to a slice if possible for efficiency
if sel is not None and len(sel) > 1 and np.all(np.diff(sel) == 1):
sel = slice(sel[0], sel[-1] + 1)
idx = slice(None, None, None) if sel is None else sel
data_shape = (n_sel_channels, stop - start)
dtype = self._dtype
if isinstance(data_buffer, np.ndarray):
if data_buffer.shape != data_shape:
raise ValueError('data_buffer has incorrect shape')
data = data_buffer
elif isinstance(data_buffer, string_types):
# use a memmap
data = np.memmap(data_buffer, mode='w+',
dtype=dtype, shape=data_shape)
else:
data = np.zeros(data_shape, dtype=dtype)
# deal with having multiple files accessed by the raw object
cumul_lens = np.concatenate(([0], np.array(self._raw_lengths,
dtype='int')))
cumul_lens = np.cumsum(cumul_lens)
files_used = np.logical_and(np.less(start, cumul_lens[1:]),
np.greater_equal(stop - 1,
cumul_lens[:-1]))
# set up cals and mult (cals, compensation, and projector)
cals = self._cals.ravel()[np.newaxis, :]
if self.comp is not None:
if projector is not None:
mult = self.comp * cals
mult = np.dot(projector[idx], mult)
else:
mult = self.comp[idx] * cals
elif projector is not None:
mult = projector[idx] * cals
else:
mult = None
cals = cals.T[idx]
# read from necessary files
offset = 0
for fi in np.nonzero(files_used)[0]:
start_file = self._first_samps[fi]
# first iteration (only) could start in the middle somewhere
if offset == 0:
start_file += start - cumul_lens[fi]
stop_file = np.min([stop - cumul_lens[fi] + self._first_samps[fi],
self._last_samps[fi] + 1])
if start_file < self._first_samps[fi] or stop_file < start_file:
raise ValueError('Bad array indexing, could be a bug')
n_read = stop_file - start_file
this_sl = slice(offset, offset + n_read)
self._read_segment_file(data[:, this_sl], idx, fi,
int(start_file), int(stop_file),
cals, mult)
offset += n_read
logger.info('[done]')
return data
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
"""Read a segment of data from a file
Only needs to be implemented for readers that support
``preload=False``.
Parameters
----------
data : ndarray, shape (len(idx), stop - start + 1)
The data array. Should be modified inplace.
idx : ndarray | slice
The requested channel indices.
fi : int
The file index that must be read from.
start : int
The start sample in the given file.
stop : int
The stop sample in the given file (inclusive).
cals : ndarray, shape (len(idx), 1)
Channel calibrations (already sub-indexed).
mult : ndarray, shape (len(idx), len(info['chs']) | None
The compensation + projection + cals matrix, if applicable.
"""
raise NotImplementedError
@verbose
def load_data(self, verbose=None):
"""Load raw data
Parameters
----------
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
raw : instance of Raw
The raw object with data.
Notes
-----
This function will load raw data if it was not already preloaded.
If data were already preloaded, it will do nothing.
.. versionadded:: 0.10.0
"""
if not self.preload:
self._preload_data(True)
return self
def _preload_data(self, preload):
"""This function actually preloads the data"""
data_buffer = preload if isinstance(preload, string_types) else None
self._data = self._read_segment(data_buffer=data_buffer)
assert len(self._data) == self.info['nchan']
self.preload = True
self.close()
def _update_times(self):
"""Helper to update times"""
self._times = np.arange(self.n_times) / float(self.info['sfreq'])
# make it immutable
self._times.flags.writeable = False
@property
def first_samp(self):
return self._first_samps[0]
@property
def last_samp(self):
return self.first_samp + sum(self._raw_lengths) - 1
@property
def _raw_lengths(self):
return [l - f + 1 for f, l in zip(self._first_samps, self._last_samps)]
def __del__(self):
# remove file for memmap
if hasattr(self, '_data') and hasattr(self._data, 'filename'):
# First, close the file out; happens automatically on del
filename = self._data.filename
del self._data
# Now file can be removed
try:
os.remove(filename)
except OSError:
pass # ignore file that no longer exists
def __enter__(self):
""" Entering with block """
return self
def __exit__(self, exception_type, exception_val, trace):
""" Exiting with block """
try:
self.close()
except:
return exception_type, exception_val, trace
def __hash__(self):
if not self.preload:
raise RuntimeError('Cannot hash raw unless preloaded')
return object_hash(dict(info=self.info, data=self._data))
def _parse_get_set_params(self, item):
# make sure item is a tuple
if not isinstance(item, tuple): # only channel selection passed
item = (item, slice(None, None, None))
if len(item) != 2: # should be channels and time instants
raise RuntimeError("Unable to access raw data (need both channels "
"and time)")
if isinstance(item[0], slice):
start = item[0].start if item[0].start is not None else 0
nchan = self.info['nchan']
stop = item[0].stop if item[0].stop is not None else nchan
step = item[0].step if item[0].step is not None else 1
sel = list(range(start, stop, step))
else:
sel = item[0]
if isinstance(item[1], slice):
time_slice = item[1]
start, stop, step = (time_slice.start, time_slice.stop,
time_slice.step)
else:
item1 = item[1]
# Let's do automated type conversion to integer here
if np.array(item[1]).dtype.kind == 'i':
item1 = int(item1)
if isinstance(item1, (int, np.integer)):
start, stop, step = item1, item1 + 1, 1
else:
raise ValueError('Must pass int or slice to __getitem__')
if start is None:
start = 0
if (step is not None) and (step is not 1):
raise ValueError('step needs to be 1 : %d given' % step)
if isinstance(sel, (int, np.integer)):
sel = np.array([sel])
if sel is not None and len(sel) == 0:
raise ValueError("Empty channel list")
return sel, start, stop
def __getitem__(self, item):
"""getting raw data content with python slicing"""
sel, start, stop = self._parse_get_set_params(item)
if self.preload:
data = self._data[sel, start:stop]
else:
data = self._read_segment(start=start, stop=stop, sel=sel,
projector=self._projector,
verbose=self.verbose)
times = self.times[start:stop]
return data, times
def __setitem__(self, item, value):
"""setting raw data content with python slicing"""
if not self.preload:
raise RuntimeError('Modifying data of Raw is only supported '
'when preloading is used. Use preload=True '
'(or string) in the constructor.')
sel, start, stop = self._parse_get_set_params(item)
# set the data
self._data[sel, start:stop] = value
def anonymize(self):
"""Anonymize data
This function will remove info['subject_info'] if it exists.
Returns
-------
raw : instance of Raw
The raw object. Operates in place.
"""
self.info._anonymize()
return self
@verbose
def apply_function(self, fun, picks, dtype, n_jobs, *args, **kwargs):
""" Apply a function to a subset of channels.
The function "fun" is applied to the channels defined in "picks". The
data of the Raw object is modified inplace. If the function returns
a different data type (e.g. numpy.complex) it must be specified using
the dtype parameter, which causes the data type used for representing
the raw data to change.
The Raw object has to be constructed using preload=True (or string).
Note: If n_jobs > 1, more memory is required as "len(picks) * n_times"
additional time points need to be temporaily stored in memory.
Note: If the data type changes (dtype != None), more memory is required
since the original and the converted data needs to be stored in
memory.
Parameters
----------
fun : function
A function to be applied to the channels. The first argument of
fun has to be a timeseries (numpy.ndarray). The function must
return an numpy.ndarray with the same size as the input.
picks : array-like of int | None
Indices of channels to apply the function to. If None, all
M-EEG channels are used.
dtype : numpy.dtype
Data type to use for raw data after applying the function. If None
the data type is not modified.
n_jobs: int
Number of jobs to run in parallel.
*args :
Additional positional arguments to pass to fun (first pos. argument
of fun is the timeseries of a channel).
**kwargs :
Keyword arguments to pass to fun. Note that if "verbose" is passed
as a member of ``kwargs``, it will be consumed and will override
the default mne-python verbose level (see mne.verbose).
"""
if not self.preload:
raise RuntimeError('Raw data needs to be preloaded. Use '
'preload=True (or string) in the constructor.')
if picks is None:
picks = pick_types(self.info, meg=True, eeg=True, exclude=[])
if not callable(fun):
raise ValueError('fun needs to be a function')
data_in = self._data
if dtype is not None and dtype != self._data.dtype:
self._data = self._data.astype(dtype)
if n_jobs == 1:
# modify data inplace to save memory
for idx in picks:
self._data[idx, :] = _check_fun(fun, data_in[idx, :],
*args, **kwargs)
else:
# use parallel function
parallel, p_fun, _ = parallel_func(_check_fun, n_jobs)
data_picks_new = parallel(p_fun(fun, data_in[p], *args, **kwargs)
for p in picks)
for pp, p in enumerate(picks):
self._data[p, :] = data_picks_new[pp]
@verbose
def apply_hilbert(self, picks, envelope=False, n_jobs=1, n_fft=None,
verbose=None):
""" Compute analytic signal or envelope for a subset of channels.
If envelope=False, the analytic signal for the channels defined in
"picks" is computed and the data of the Raw object is converted to
a complex representation (the analytic signal is complex valued).
If envelope=True, the absolute value of the analytic signal for the
channels defined in "picks" is computed, resulting in the envelope
signal.
Note: DO NOT use envelope=True if you intend to compute an inverse
solution from the raw data. If you want to compute the
envelope in source space, use envelope=False and compute the
envelope after the inverse solution has been obtained.
Note: If envelope=False, more memory is required since the original
raw data as well as the analytic signal have temporarily to
be stored in memory.
Note: If n_jobs > 1 and envelope=True, more memory is required as
"len(picks) * n_times" additional time points need to be
temporaily stored in memory.
Parameters
----------
picks : array-like of int
Indices of channels to apply the function to.
envelope : bool (default: False)
Compute the envelope signal of each channel.
n_jobs: int
Number of jobs to run in parallel.
n_fft : int > self.n_times | None
Points to use in the FFT for Hilbert transformation. The signal
will be padded with zeros before computing Hilbert, then cut back
to original length. If None, n == self.n_times.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Notes
-----
The analytic signal "x_a(t)" of "x(t)" is::
x_a = F^{-1}(F(x) 2U) = x + i y
where "F" is the Fourier transform, "U" the unit step function,
and "y" the Hilbert transform of "x". One usage of the analytic
signal is the computation of the envelope signal, which is given by
"e(t) = abs(x_a(t))". Due to the linearity of Hilbert transform and the
MNE inverse solution, the enevlope in source space can be obtained
by computing the analytic signal in sensor space, applying the MNE
inverse, and computing the envelope in source space.
Also note that the n_fft parameter will allow you to pad the signal
with zeros before performing the Hilbert transform. This padding
is cut off, but it may result in a slightly different result
(particularly around the edges). Use at your own risk.
"""
n_fft = self.n_times if n_fft is None else n_fft
if n_fft < self.n_times:
raise ValueError("n_fft must be greater than n_times")
if envelope is True:
self.apply_function(_my_hilbert, picks, None, n_jobs, n_fft,
envelope=envelope)
else:
self.apply_function(_my_hilbert, picks, np.complex64, n_jobs,
n_fft, envelope=envelope)
@verbose
def filter(self, l_freq, h_freq, picks=None, filter_length='10s',
l_trans_bandwidth=0.5, h_trans_bandwidth=0.5, n_jobs=1,
method='fft', iir_params=None, verbose=None):
"""Filter a subset of channels.
Applies a zero-phase low-pass, high-pass, band-pass, or band-stop
filter to the channels selected by "picks". The data of the Raw
object is modified inplace.
The Raw object has to be constructed using preload=True (or string).
l_freq and h_freq are the frequencies below which and above which,
respectively, to filter out of the data. Thus the uses are:
* ``l_freq < h_freq``: band-pass filter
* ``l_freq > h_freq``: band-stop filter
* ``l_freq is not None and h_freq is None``: high-pass filter
* ``l_freq is None and h_freq is not None``: low-pass filter
If n_jobs > 1, more memory is required as "len(picks) * n_times"
additional time points need to be temporarily stored in memory.
self.info['lowpass'] and self.info['highpass'] are only updated
with picks=None.
Parameters
----------
l_freq : float | None
Low cut-off frequency in Hz. If None the data are only low-passed.
h_freq : float | None
High cut-off frequency in Hz. If None the data are only
high-passed.
picks : array-like of int | None
Indices of channels to filter. If None only the data (MEG/EEG)
channels will be filtered.
filter_length : str (Default: '10s') | int | None
Length of the filter to use. If None or "len(x) < filter_length",
the filter length used is len(x). Otherwise, if int, overlap-add
filtering with a filter of the specified length in samples) is
used (faster for long signals). If str, a human-readable time in
units of "s" or "ms" (e.g., "10s" or "5500ms") will be converted
to the shortest power-of-two length at least that duration.
Not used for 'iir' filters.
l_trans_bandwidth : float
Width of the transition band at the low cut-off frequency in Hz
(high pass or cutoff 1 in bandpass). Not used if 'order' is
specified in iir_params.
h_trans_bandwidth : float
Width of the transition band at the high cut-off frequency in Hz
(low pass or cutoff 2 in bandpass). Not used if 'order' is
specified in iir_params.
n_jobs : int | str
Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
is installed properly, CUDA is initialized, and method='fft'.
method : str
'fft' will use overlap-add FIR filtering, 'iir' will use IIR
forward-backward filtering (via filtfilt).
iir_params : dict | None
Dictionary of parameters to use for IIR filtering.
See mne.filter.construct_iir_filter for details. If iir_params
is None and method="iir", 4th order Butterworth will be used.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
See Also
--------
mne.Epochs.savgol_filter
"""
if verbose is None:
verbose = self.verbose
fs = float(self.info['sfreq'])
if l_freq == 0:
l_freq = None
if h_freq is not None and h_freq > (fs / 2.):
h_freq = None
if l_freq is not None and not isinstance(l_freq, float):
l_freq = float(l_freq)
if h_freq is not None and not isinstance(h_freq, float):
h_freq = float(h_freq)
if not self.preload:
raise RuntimeError('Raw data needs to be preloaded to filter. Use '
'preload=True (or string) in the constructor.')
if picks is None:
if 'ICA ' in ','.join(self.ch_names):
pick_parameters = dict(misc=True, ref_meg=False)
else:
pick_parameters = dict(meg=True, eeg=True, ref_meg=False)
picks = pick_types(self.info, exclude=[], **pick_parameters)
# let's be safe.
if len(picks) < 1:
raise RuntimeError('Could not find any valid channels for '
'your Raw object. Please contact the '
'MNE-Python developers.')
# update info if filter is applied to all data channels,
# and it's not a band-stop filter
if h_freq is not None:
if (l_freq is None or l_freq < h_freq) and \
(self.info["lowpass"] is None or
h_freq < self.info['lowpass']):
self.info['lowpass'] = h_freq
if l_freq is not None:
if (h_freq is None or l_freq < h_freq) and \
(self.info["highpass"] is None or
l_freq > self.info['highpass']):
self.info['highpass'] = l_freq
if l_freq is None and h_freq is not None:
logger.info('Low-pass filtering at %0.2g Hz' % h_freq)
low_pass_filter(self._data, fs, h_freq,
filter_length=filter_length,
trans_bandwidth=h_trans_bandwidth, method=method,
iir_params=iir_params, picks=picks, n_jobs=n_jobs,
copy=False)
if l_freq is not None and h_freq is None:
logger.info('High-pass filtering at %0.2g Hz' % l_freq)
high_pass_filter(self._data, fs, l_freq,
filter_length=filter_length,
trans_bandwidth=l_trans_bandwidth, method=method,
iir_params=iir_params, picks=picks, n_jobs=n_jobs,
copy=False)
if l_freq is not None and h_freq is not None:
if l_freq < h_freq:
logger.info('Band-pass filtering from %0.2g - %0.2g Hz'
% (l_freq, h_freq))
self._data = band_pass_filter(
self._data, fs, l_freq, h_freq,
filter_length=filter_length,
l_trans_bandwidth=l_trans_bandwidth,
h_trans_bandwidth=h_trans_bandwidth,
method=method, iir_params=iir_params, picks=picks,
n_jobs=n_jobs, copy=False)
else:
logger.info('Band-stop filtering from %0.2g - %0.2g Hz'
% (h_freq, l_freq))
self._data = band_stop_filter(
self._data, fs, h_freq, l_freq,
filter_length=filter_length,
l_trans_bandwidth=h_trans_bandwidth,
h_trans_bandwidth=l_trans_bandwidth, method=method,
iir_params=iir_params, picks=picks, n_jobs=n_jobs,
copy=False)
@verbose
def notch_filter(self, freqs, picks=None, filter_length='10s',
notch_widths=None, trans_bandwidth=1.0, n_jobs=1,
method='fft', iir_params=None,
mt_bandwidth=None, p_value=0.05, verbose=None):
"""Notch filter a subset of channels.
Applies a zero-phase notch filter to the channels selected by
"picks". The data of the Raw object is modified inplace.
The Raw object has to be constructed using preload=True (or string).
Note: If n_jobs > 1, more memory is required as "len(picks) * n_times"
additional time points need to be temporaily stored in memory.
Parameters
----------
freqs : float | array of float | None
Specific frequencies to filter out from data, e.g.,
np.arange(60, 241, 60) in the US or np.arange(50, 251, 50) in
Europe. None can only be used with the mode 'spectrum_fit',
where an F test is used to find sinusoidal components.
picks : array-like of int | None
Indices of channels to filter. If None only the data (MEG/EEG)
channels will be filtered.
filter_length : str (Default: '10s') | int | None
Length of the filter to use. If None or "len(x) < filter_length",
the filter length used is len(x). Otherwise, if int, overlap-add
filtering with a filter of the specified length in samples) is
used (faster for long signals). If str, a human-readable time in
units of "s" or "ms" (e.g., "10s" or "5500ms") will be converted
to the shortest power-of-two length at least that duration.
Not used for 'iir' filters.
notch_widths : float | array of float | None
Width of each stop band (centred at each freq in freqs) in Hz.
If None, freqs / 200 is used.
trans_bandwidth : float
Width of the transition band in Hz.
n_jobs : int | str
Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
is installed properly, CUDA is initialized, and method='fft'.
method : str
'fft' will use overlap-add FIR filtering, 'iir' will use IIR
forward-backward filtering (via filtfilt). 'spectrum_fit' will
use multi-taper estimation of sinusoidal components.
iir_params : dict | None
Dictionary of parameters to use for IIR filtering.
See mne.filter.construct_iir_filter for details. If iir_params
is None and method="iir", 4th order Butterworth will be used.
mt_bandwidth : float | None
The bandwidth of the multitaper windowing function in Hz.
Only used in 'spectrum_fit' mode.
p_value : float
p-value to use in F-test thresholding to determine significant
sinusoidal components to remove when method='spectrum_fit' and
freqs=None. Note that this will be Bonferroni corrected for the
number of frequencies, so large p-values may be justified.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Notes
-----
For details, see mne.filter.notch_filter.
"""
if verbose is None:
verbose = self.verbose
fs = float(self.info['sfreq'])
if picks is None:
if 'ICA ' in ','.join(self.ch_names):
pick_parameters = dict(misc=True)
else:
pick_parameters = dict(meg=True, eeg=True)
picks = pick_types(self.info, exclude=[], **pick_parameters)
# let's be safe.
if len(picks) < 1:
raise RuntimeError('Could not find any valid channels for '
'your Raw object. Please contact the '
'MNE-Python developers.')
if not self.preload:
raise RuntimeError('Raw data needs to be preloaded to filter. Use '
'preload=True (or string) in the constructor.')
self._data = notch_filter(self._data, fs, freqs,
filter_length=filter_length,
notch_widths=notch_widths,
trans_bandwidth=trans_bandwidth,
method=method, iir_params=iir_params,
mt_bandwidth=mt_bandwidth, p_value=p_value,
picks=picks, n_jobs=n_jobs, copy=False)
@verbose
def resample(self, sfreq, npad=100, window='boxcar', stim_picks=None,
n_jobs=1, events=None, copy=False, verbose=None):
"""Resample data channels.
Resamples all channels.
The Raw object has to be constructed using preload=True (or string).
.. warning:: The intended purpose of this function is primarily to
speed up computations (e.g., projection calculation) when
precise timing of events is not required, as downsampling
raw data effectively jitters trigger timings. It is
generally recommended not to epoch downsampled data,
but instead epoch and then downsample, as epoching
downsampled data jitters triggers.
See here for an example:
https://gist.github.com/Eric89GXL/01642cb3789992fbca59
If resampling the continuous data is desired, it is
recommended to construct events using the original data.
The event onsets can be jointly resampled with the raw
data using the 'events' parameter.
Parameters
----------
sfreq : float
New sample rate to use.
npad : int
Amount to pad the start and end of the data.
window : string or tuple
Window to use in resampling. See scipy.signal.resample.
stim_picks : array of int | None
Stim channels. These channels are simply subsampled or
supersampled (without applying any filtering). This reduces
resampling artifacts in stim channels, but may lead to missing
triggers. If None, stim channels are automatically chosen using
mne.pick_types(raw.info, meg=False, stim=True, exclude=[]).
n_jobs : int | str
Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
is installed properly and CUDA is initialized.
events : 2D array, shape (n_events, 3) | None
An optional event matrix. When specified, the onsets of the events
are resampled jointly with the data.
copy : bool
Whether to operate on a copy of the data (True) or modify data
in-place (False). Defaults to False.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Returns
-------
raw : instance of Raw
The resampled version of the raw object.
Notes
-----
For some data, it may be more accurate to use npad=0 to reduce
artifacts. This is dataset dependent -- check your data!
"""
if not self.preload:
raise RuntimeError('Can only resample preloaded data')
inst = self.copy() if copy else self
# When no event object is supplied, some basic detection of dropped
# events is performed to generate a warning. Finding events can fail
# for a variety of reasons, e.g. if no stim channel is present or it is
# corrupted. This should not stop the resampling from working. The
# warning should simply not be generated in this case.
if events is None:
try:
original_events = find_events(inst)
except:
pass
sfreq = float(sfreq)
o_sfreq = float(inst.info['sfreq'])
offsets = np.concatenate(([0], np.cumsum(inst._raw_lengths)))
new_data = list()
ratio = sfreq / o_sfreq
# set up stim channel processing
if stim_picks is None:
stim_picks = pick_types(inst.info, meg=False, ref_meg=False,
stim=True, exclude=[])
stim_picks = np.asanyarray(stim_picks)
for ri in range(len(inst._raw_lengths)):
data_chunk = inst._data[:, offsets[ri]:offsets[ri + 1]]
new_data.append(resample(data_chunk, sfreq, o_sfreq, npad,
n_jobs=n_jobs))
new_ntimes = new_data[ri].shape[1]
# In empirical testing, it was faster to resample all channels
# (above) and then replace the stim channels than it was to only
# resample the proper subset of channels and then use np.insert()
# to restore the stims.
if len(stim_picks) > 0:
stim_resampled = _resample_stim_channels(
data_chunk[stim_picks], new_data[ri].shape[1],
data_chunk.shape[1])
new_data[ri][stim_picks] = stim_resampled
inst._first_samps[ri] = int(inst._first_samps[ri] * ratio)
inst._last_samps[ri] = inst._first_samps[ri] + new_ntimes - 1
inst._raw_lengths[ri] = new_ntimes
inst._data = np.concatenate(new_data, axis=1)
inst.info['sfreq'] = sfreq
inst._update_times()
# See the comment above why we ignore all errors here.
if events is None:
try:
# Did we loose events?
resampled_events = find_events(inst)
if len(resampled_events) != len(original_events):
warnings.warn(
'Resampling of the stim channels caused event '
'information to become unreliable. Consider finding '
'events on the original data and passing the event '
'matrix as a parameter.'
)
except:
pass
return inst
else:
if copy:
events = events.copy()
events[:, 0] = np.minimum(
np.round(events[:, 0] * ratio).astype(int),
inst._data.shape[1]
)
return inst, events
def crop(self, tmin=0.0, tmax=None, copy=True):
"""Crop raw data file.
Limit the data from the raw file to go between specific times. Note
that the new tmin is assumed to be t=0 for all subsequently called
functions (e.g., time_as_index, or Epochs). New first_samp and
last_samp are set accordingly. And data are modified in-place when
called with copy=False.
Parameters
----------
tmin : float
New start time in seconds (must be >= 0).
tmax : float | None
New end time in seconds of the data (cannot exceed data duration).
copy : bool
If False Raw is cropped in place.
Returns
-------
raw : instance of Raw
The cropped raw object.
"""
raw = self.copy() if copy is True else self
max_time = (raw.n_times - 1) / raw.info['sfreq']
if tmax is None:
tmax = max_time
if tmin > tmax:
raise ValueError('tmin must be less than tmax')
if tmin < 0.0:
raise ValueError('tmin must be >= 0')
elif tmax > max_time:
raise ValueError('tmax must be less than or equal to the max raw '
'time (%0.4f sec)' % max_time)
smin, smax = np.where(_time_mask(self.times, tmin, tmax))[0][[0, -1]]
cumul_lens = np.concatenate(([0], np.array(raw._raw_lengths,
dtype='int')))
cumul_lens = np.cumsum(cumul_lens)
keepers = np.logical_and(np.less(smin, cumul_lens[1:]),
np.greater_equal(smax, cumul_lens[:-1]))
keepers = np.where(keepers)[0]
raw._first_samps = np.atleast_1d(raw._first_samps[keepers])
# Adjust first_samp of first used file!
raw._first_samps[0] += smin - cumul_lens[keepers[0]]
raw._last_samps = np.atleast_1d(raw._last_samps[keepers])
raw._last_samps[-1] -= cumul_lens[keepers[-1] + 1] - 1 - smax
raw._raw_extras = [r for ri, r in enumerate(raw._raw_extras)
if ri in keepers]
raw._filenames = [r for ri, r in enumerate(raw._filenames)
if ri in keepers]
if raw.preload:
# slice and copy to avoid the reference to large array
raw._data = raw._data[:, smin:smax + 1].copy()
raw._update_times()
return raw
@verbose
def save(self, fname, picks=None, tmin=0, tmax=None, buffer_size_sec=10,
drop_small_buffer=False, proj=False, fmt='single',
overwrite=False, split_size='2GB', verbose=None):
"""Save raw data to file
Parameters
----------
fname : string
File name of the new dataset. This has to be a new filename
unless data have been preloaded. Filenames should end with
raw.fif, raw.fif.gz, raw_sss.fif, raw_sss.fif.gz, raw_tsss.fif
or raw_tsss.fif.gz.
picks : array-like of int | None
Indices of channels to include. If None all channels are kept.
tmin : float | None
Time in seconds of first sample to save. If None first sample
is used.
tmax : float | None
Time in seconds of last sample to save. If None last sample
is used.
buffer_size_sec : float | None
Size of data chunks in seconds. If None, the buffer size of
the original file is used.
drop_small_buffer : bool
Drop or not the last buffer. It is required by maxfilter (SSS)
that only accepts raw files with buffers of the same size.
proj : bool
If True the data is saved with the projections applied (active).
Note: If apply_proj() was used to apply the projections,
the projectons will be active even if proj is False.
fmt : str
Format to use to save raw data. Valid options are 'double',
'single', 'int', and 'short' for 64- or 32-bit float, or 32- or
16-bit integers, respectively. It is **strongly** recommended to
use 'single', as this is backward-compatible, and is standard for
maintaining precision. Note that using 'short' or 'int' may result
in loss of precision, complex data cannot be saved as 'short',
and neither complex data types nor real data stored as 'double'
can be loaded with the MNE command-line tools. See raw.orig_format
to determine the format the original data were stored in.
overwrite : bool
If True, the destination file (if it exists) will be overwritten.
If False (default), an error will be raised if the file exists.
split_size : string | int
Large raw files are automatically split into multiple pieces. This
parameter specifies the maximum size of each piece. If the
parameter is an integer, it specifies the size in Bytes. It is
also possible to pass a human-readable string, e.g., 100MB.
Note: Due to FIFF file limitations, the maximum split size is 2GB.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Notes
-----
If Raw is a concatenation of several raw files, **be warned** that
only the measurement information from the first raw file is stored.
This likely means that certain operations with external tools may not
work properly on a saved concatenated file (e.g., probably some
or all forms of SSS). It is recommended not to concatenate and
then save raw files for this reason.
"""
check_fname(fname, 'raw', ('raw.fif', 'raw_sss.fif', 'raw_tsss.fif',
'raw.fif.gz', 'raw_sss.fif.gz',
'raw_tsss.fif.gz'))
split_size = _get_split_size(split_size)
fname = op.realpath(fname)
if not self.preload and fname in self._filenames:
raise ValueError('You cannot save data to the same file.'
' Please use a different filename.')
if self.preload:
if np.iscomplexobj(self._data):
warnings.warn('Saving raw file with complex data. Loading '
'with command-line MNE tools will not work.')
type_dict = dict(short=FIFF.FIFFT_DAU_PACK16,
int=FIFF.FIFFT_INT,
single=FIFF.FIFFT_FLOAT,
double=FIFF.FIFFT_DOUBLE)
if fmt not in type_dict.keys():
raise ValueError('fmt must be "short", "int", "single", '
'or "double"')
reset_dict = dict(short=False, int=False, single=True, double=True)
reset_range = reset_dict[fmt]
data_type = type_dict[fmt]
data_test = self[0, 0][0]
if fmt == 'short' and np.iscomplexobj(data_test):
raise ValueError('Complex data must be saved as "single" or '
'"double", not "short"')
# check for file existence
_check_fname(fname, overwrite)
if proj:
info = copy.deepcopy(self.info)
projector, info = setup_proj(info)
activate_proj(info['projs'], copy=False)
else:
info = self.info
projector = None
# set the correct compensation grade and make inverse compensator
inv_comp = None
if self.comp is not None:
inv_comp = linalg.inv(self.comp)
set_current_comp(info, self._orig_comp_grade)
#
# Set up the reading parameters
#
# Convert to samples
start = int(np.floor(tmin * self.info['sfreq']))
if tmax is None:
stop = self.last_samp + 1 - self.first_samp
else:
stop = int(np.floor(tmax * self.info['sfreq']))
buffer_size = self._get_buffer_size(buffer_size_sec)
# write the raw file
_write_raw(fname, self, info, picks, fmt, data_type, reset_range,
start, stop, buffer_size, projector, inv_comp,
drop_small_buffer, split_size, 0, None)
def plot(self, events=None, duration=10.0, start=0.0, n_channels=20,
bgcolor='w', color=None, bad_color=(0.8, 0.8, 0.8),
event_color='cyan', scalings=None, remove_dc=True, order='type',
show_options=False, title=None, show=True, block=False,
highpass=None, lowpass=None, filtorder=4, clipping=None):
"""Plot raw data
Parameters
----------
events : array | None
Events to show with vertical bars.
duration : float
Time window (sec) to plot in a given time.
start : float
Initial time to show (can be changed dynamically once plotted).
n_channels : int
Number of channels to plot at once. Defaults to 20.
bgcolor : color object
Color of the background.
color : dict | color object | None
Color for the data traces. If None, defaults to::
dict(mag='darkblue', grad='b', eeg='k', eog='k', ecg='r',
emg='k', ref_meg='steelblue', misc='k', stim='k',
resp='k', chpi='k')
bad_color : color object
Color to make bad channels.
event_color : color object
Color to use for events.
scalings : dict | None
Scale factors for the traces. If None, defaults to::
dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4,
emg=1e-3, ref_meg=1e-12, misc=1e-3, stim=1,
resp=1, chpi=1e-4)
remove_dc : bool
If True remove DC component when plotting data.
order : 'type' | 'original' | array
Order in which to plot data. 'type' groups by channel type,
'original' plots in the order of ch_names, array gives the
indices to use in plotting.
show_options : bool
If True, a dialog for options related to projection is shown.
title : str | None
The title of the window. If None, and either the filename of the
raw object or '<unknown>' will be displayed as title.
show : bool
Show figures if True
block : bool
Whether to halt program execution until the figure is closed.
Useful for setting bad channels on the fly (click on line).
May not work on all systems / platforms.
highpass : float | None
Highpass to apply when displaying data.
lowpass : float | None
Lowpass to apply when displaying data.
filtorder : int
Filtering order. Note that for efficiency and simplicity,
filtering during plotting uses forward-backward IIR filtering,
so the effective filter order will be twice ``filtorder``.
Filtering the lines for display may also produce some edge
artifacts (at the left and right edges) of the signals
during display. Filtering requires scipy >= 0.10.
clipping : str | None
If None, channels are allowed to exceed their designated bounds in
the plot. If "clamp", then values are clamped to the appropriate
range for display, creating step-like artifacts. If "transparent",
then excessive values are not shown, creating gaps in the traces.
Returns
-------
fig : Instance of matplotlib.figure.Figure
Raw traces.
Notes
-----
The arrow keys (up/down/left/right) can typically be used to navigate
between channels and time ranges, but this depends on the backend
matplotlib is configured to use (e.g., mpl.use('TkAgg') should work).
The scaling can be adjusted with - and + (or =) keys. The viewport
dimensions can be adjusted with page up/page down and home/end keys.
Full screen mode can be to toggled with f11 key. To mark or un-mark a
channel as bad, click on the rather flat segments of a channel's time
series. The changes will be reflected immediately in the raw object's
``raw.info['bads']`` entry.
"""
return plot_raw(self, events, duration, start, n_channels, bgcolor,
color, bad_color, event_color, scalings, remove_dc,
order, show_options, title, show, block, highpass,
lowpass, filtorder, clipping)
@verbose
def plot_psd(self, tmin=0.0, tmax=60.0, fmin=0, fmax=np.inf,
proj=False, n_fft=2048, picks=None, ax=None,
color='black', area_mode='std', area_alpha=0.33,
n_overlap=0, dB=True, show=True, n_jobs=1, verbose=None):
"""Plot the power spectral density across channels
Parameters
----------
tmin : float
Start time for calculations.
tmax : float
End time for calculations.
fmin : float
Start frequency to consider.
fmax : float
End frequency to consider.
proj : bool
Apply projection.
n_fft : int
Number of points to use in Welch FFT calculations.
picks : array-like of int | None
List of channels to use. Cannot be None if `ax` is supplied. If
both `picks` and `ax` are None, separate subplots will be created
for each standard channel type (`mag`, `grad`, and `eeg`).
ax : instance of matplotlib Axes | None
Axes to plot into. If None, axes will be created.
color : str | tuple
A matplotlib-compatible color to use.
area_mode : str | None
How to plot area. If 'std', the mean +/- 1 STD (across channels)
will be plotted. If 'range', the min and max (across channels)
will be plotted. Bad channels will be excluded from these
calculations. If None, no area will be plotted.
area_alpha : float
Alpha for the area.
n_overlap : int
The number of points of overlap between blocks. The default value
is 0 (no overlap).
dB : bool
If True, transform data to decibels.
show : bool
Call pyplot.show() at the end.
n_jobs : int
Number of jobs to run in parallel.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : instance of matplotlib figure
Figure with frequency spectra of the data channels.
"""
return plot_raw_psd(self, tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
proj=proj, n_fft=n_fft, picks=picks, ax=ax,
color=color, area_mode=area_mode,
area_alpha=area_alpha, n_overlap=n_overlap,
dB=dB, show=show, n_jobs=n_jobs)
def plot_psd_topo(self, tmin=0., tmax=None, fmin=0, fmax=100, proj=False,
n_fft=2048, n_overlap=0, layout=None, color='w',
fig_facecolor='k', axis_facecolor='k', dB=True,
show=True, n_jobs=1, verbose=None):
"""Function for plotting channel wise frequency spectra as topography.
Parameters
----------
tmin : float
Start time for calculations. Defaults to zero.
tmax : float | None
End time for calculations. If None (default), the end of data is
used.
fmin : float
Start frequency to consider. Defaults to zero.
fmax : float
End frequency to consider. Defaults to 100.
proj : bool
Apply projection. Defaults to False.
n_fft : int
Number of points to use in Welch FFT calculations. Defaults to
2048.
n_overlap : int
The number of points of overlap between blocks. Defaults to 0
(no overlap).
layout : instance of Layout | None
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If None (default), the correct
layout is inferred from the data.
color : str | tuple
A matplotlib-compatible color to use for the curves. Defaults to
white.
fig_facecolor : str | tuple
A matplotlib-compatible color to use for the figure background.
Defaults to black.
axis_facecolor : str | tuple
A matplotlib-compatible color to use for the axis background.
Defaults to black.
dB : bool
If True, transform data to decibels. Defaults to True.
show : bool
Show figure if True. Defaults to True.
n_jobs : int
Number of jobs to run in parallel. Defaults to 1.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
return plot_raw_psd_topo(self, tmin=tmin, tmax=tmax, fmin=fmin,
fmax=fmax, proj=proj, n_fft=n_fft,
n_overlap=n_overlap, layout=layout,
color=color, fig_facecolor=fig_facecolor,
axis_facecolor=axis_facecolor, dB=dB,
show=show, n_jobs=n_jobs, verbose=verbose)
def time_as_index(self, times, use_first_samp=False, use_rounding=False):
"""Convert time to indices
Parameters
----------
times : list-like | float | int
List of numbers or a number representing points in time.
use_first_samp : boolean
If True, time is treated as relative to the session onset, else
as relative to the recording onset.
use_rounding : boolean
If True, use rounding (instead of truncation) when converting
times to indicies. This can help avoid non-unique indices.
Returns
-------
index : ndarray
Indices corresponding to the times supplied.
"""
return _time_as_index(times, self.info['sfreq'], self.first_samp,
use_first_samp, use_rounding=use_rounding)
def index_as_time(self, index, use_first_samp=False):
"""Convert indices to time
Parameters
----------
index : list-like | int
List of ints or int representing points in time.
use_first_samp : boolean
If True, the time returned is relative to the session onset, else
relative to the recording onset.
Returns
-------
times : ndarray
Times corresponding to the index supplied.
"""
return _index_as_time(index, self.info['sfreq'], self.first_samp,
use_first_samp)
def estimate_rank(self, tstart=0.0, tstop=30.0, tol=1e-4,
return_singular=False, picks=None, scalings='norm'):
"""Estimate rank of the raw data
This function is meant to provide a reasonable estimate of the rank.
The true rank of the data depends on many factors, so use at your
own risk.
Parameters
----------
tstart : float
Start time to use for rank estimation. Default is 0.0.
tstop : float | None
End time to use for rank estimation. Default is 30.0.
If None, the end time of the raw file is used.
tol : float
Tolerance for singular values to consider non-zero in
calculating the rank. The singular values are calculated
in this method such that independent data are expected to
have singular value around one.
return_singular : bool
If True, also return the singular values that were used
to determine the rank.
picks : array_like of int, shape (n_selected_channels,)
The channels to be considered for rank estimation.
If None (default) meg and eeg channels are included.
scalings : dict | 'norm'
To achieve reliable rank estimation on multiple sensors,
sensors have to be rescaled. This parameter controls the
rescaling. If dict, it will update the
following dict of defaults:
dict(mag=1e11, grad=1e9, eeg=1e5)
If 'norm' data will be scaled by internally computed
channel-wise norms.
Defaults to 'norm'.
Returns
-------
rank : int
Estimated rank of the data.
s : array
If return_singular is True, the singular values that were
thresholded to determine the rank are also returned.
Notes
-----
If data are not pre-loaded, the appropriate data will be loaded
by this function (can be memory intensive).
Projectors are not taken into account unless they have been applied
to the data using apply_proj(), since it is not always possible
to tell whether or not projectors have been applied previously.
Bad channels will be excluded from calculations.
"""
from ..cov import _estimate_rank_meeg_signals
start = max(0, self.time_as_index(tstart)[0])
if tstop is None:
stop = self.n_times - 1
else:
stop = min(self.n_times - 1, self.time_as_index(tstop)[0])
tslice = slice(start, stop + 1)
if picks is None:
picks = pick_types(self.info, meg=True, eeg=True, ref_meg=False,
exclude='bads')
# ensure we don't get a view of data
if len(picks) == 1:
return 1.0, 1.0
# this should already be a copy, so we can overwrite it
data = self[picks, tslice][0]
out = _estimate_rank_meeg_signals(
data, pick_info(self.info, picks),
scalings=scalings, tol=tol, return_singular=return_singular,
copy=False)
return out
@property
def ch_names(self):
"""Channel names"""
return self.info['ch_names']
@property
def times(self):
"""Time points"""
return self._times
@property
def n_times(self):
"""Number of time points"""
return self.last_samp - self.first_samp + 1
def __len__(self):
return self.n_times
def load_bad_channels(self, bad_file=None, force=False):
"""
Mark channels as bad from a text file, in the style
(mostly) of the C function mne_mark_bad_channels
Parameters
----------
bad_file : string
File name of the text file containing bad channels
If bad_file = None, bad channels are cleared, but this
is more easily done directly as raw.info['bads'] = [].
force : boolean
Whether or not to force bad channel marking (of those
that exist) if channels are not found, instead of
raising an error.
"""
if bad_file is not None:
# Check to make sure bad channels are there
names = frozenset(self.info['ch_names'])
with open(bad_file) as fid:
bad_names = [l for l in fid.read().splitlines() if l]
names_there = [ci for ci in bad_names if ci in names]
count_diff = len(bad_names) - len(names_there)
if count_diff > 0:
if not force:
raise ValueError('Bad channels from:\n%s\n not found '
'in:\n%s' % (bad_file,
self._filenames[0]))
else:
warnings.warn('%d bad channels from:\n%s\nnot found '
'in:\n%s' % (count_diff, bad_file,
self._filenames[0]))
self.info['bads'] = names_there
else:
self.info['bads'] = []
def append(self, raws, preload=None):
"""Concatenate raw instances as if they were continuous
Parameters
----------
raws : list, or Raw instance
list of Raw instances to concatenate to the current instance
(in order), or a single raw instance to concatenate.
preload : bool, str, or None (default None)
Preload data into memory for data manipulation and faster indexing.
If True, the data will be preloaded into memory (fast, requires
large amount of memory). If preload is a string, preload is the
file name of a memory-mapped file which is used to store the data
on the hard drive (slower, requires less memory). If preload is
None, preload=True or False is inferred using the preload status
of the raw files passed in.
"""
from .fiff.raw import RawFIF
from .kit.kit import RawKIT
from .edf.edf import RawEDF
if not isinstance(raws, list):
raws = [raws]
# make sure the raws are compatible
all_raws = [self]
all_raws += raws
_check_raw_compatibility(all_raws)
# deal with preloading data first (while files are separate)
all_preloaded = self.preload and all(r.preload for r in raws)
if preload is None:
if all_preloaded:
preload = True
else:
preload = False
if not preload and not isinstance(self, (RawFIF, RawKIT, RawEDF)):
raise RuntimeError('preload must be True to concatenate '
'files unless they are FIF, KIT, or EDF')
if preload is False:
if self.preload:
self._data = None
self.preload = False
else:
# do the concatenation ourselves since preload might be a string
nchan = self.info['nchan']
c_ns = np.cumsum([rr.n_times for rr in ([self] + raws)])
nsamp = c_ns[-1]
if not self.preload:
this_data = self._read_segment()
else:
this_data = self._data
# allocate the buffer
if isinstance(preload, string_types):
_data = np.memmap(preload, mode='w+', dtype=this_data.dtype,
shape=(nchan, nsamp))
else:
_data = np.empty((nchan, nsamp), dtype=this_data.dtype)
_data[:, 0:c_ns[0]] = this_data
for ri in range(len(raws)):
if not raws[ri].preload:
# read the data directly into the buffer
data_buffer = _data[:, c_ns[ri]:c_ns[ri + 1]]
raws[ri]._read_segment(data_buffer=data_buffer)
else:
_data[:, c_ns[ri]:c_ns[ri + 1]] = raws[ri]._data
self._data = _data
self.preload = True
# now combine information from each raw file to construct new self
for r in raws:
self._first_samps = np.r_[self._first_samps, r._first_samps]
self._last_samps = np.r_[self._last_samps, r._last_samps]
self._raw_extras += r._raw_extras
self._filenames += r._filenames
self._update_times()
if not (len(self._first_samps) == len(self._last_samps) ==
len(self._raw_extras) == len(self._filenames)):
raise RuntimeError('Append error') # should never happen
def close(self):
"""Clean up the object.
Does nothing for objects that close their file descriptors.
Things like RawFIF will override this method.
"""
pass
def copy(self):
""" Return copy of Raw instance
"""
return deepcopy(self)
def __repr__(self):
name = self._filenames[0]
name = 'None' if name is None else op.basename(name)
s = ', '.join(('%r' % name, "n_channels x n_times : %s x %s"
% (len(self.ch_names), self.n_times)))
s = "n_channels x n_times : %s x %s" % (len(self.info['ch_names']),
self.n_times)
return "<%s | %s>" % (self.__class__.__name__, s)
def add_events(self, events, stim_channel=None):
"""Add events to stim channel
Parameters
----------
events : ndarray, shape (n_events, 3)
Events to add. The first column specifies the sample number of
each event, the second column is ignored, and the third column
provides the event value. If events already exist in the Raw
instance at the given sample numbers, the event values will be
added together.
stim_channel : str | None
Name of the stim channel to add to. If None, the config variable
'MNE_STIM_CHANNEL' is used. If this is not found, it will default
to 'STI 014'.
Notes
-----
Data must be preloaded in order to add events.
"""
if not self.preload:
raise RuntimeError('cannot add events unless data are preloaded')
events = np.asarray(events)
if events.ndim != 2 or events.shape[1] != 3:
raise ValueError('events must be shape (n_events, 3)')
stim_channel = _get_stim_channel(stim_channel, self.info)
pick = pick_channels(self.ch_names, stim_channel)
if len(pick) == 0:
raise ValueError('Channel %s not found' % stim_channel)
pick = pick[0]
idx = events[:, 0].astype(int)
if np.any(idx < self.first_samp) or np.any(idx > self.last_samp):
raise ValueError('event sample numbers must be between %s and %s'
% (self.first_samp, self.last_samp))
if not all(idx == events[:, 0]):
raise ValueError('event sample numbers must be integers')
self._data[pick, idx - self.first_samp] += events[:, 2]
def _get_buffer_size(self, buffer_size_sec=None):
"""Helper to get the buffer size"""
if buffer_size_sec is None:
if 'buffer_size_sec' in self.info:
buffer_size_sec = self.info['buffer_size_sec']
else:
buffer_size_sec = 10.0
return int(np.ceil(buffer_size_sec * self.info['sfreq']))
def _allocate_data(data, data_buffer, data_shape, dtype):
"""Helper to data in memory or in memmap for preloading"""
if data is None:
# if not already done, allocate array with right type
if isinstance(data_buffer, string_types):
# use a memmap
data = np.memmap(data_buffer, mode='w+',
dtype=dtype, shape=data_shape)
else:
data = np.zeros(data_shape, dtype=dtype)
return data
def _time_as_index(times, sfreq, first_samp=0, use_first_samp=False,
use_rounding=False):
"""Convert time to indices
Parameters
----------
times : list-like | float | int
List of numbers or a number representing points in time.
sfreq : float | int
Sample frequency.
first_samp : int
Index to use as first time point.
use_first_samp : boolean
If True, time is treated as relative to the session onset, else
as relative to the recording onset.
use_rounding : boolean
If True, use rounding (instead of truncation) when converting times to
indicies. This can help avoid non-unique indices.
Returns
-------
index : ndarray
Indices corresponding to the times supplied.
Notes
-----
np.round will return the nearest even number for values exactly between
two integers.
"""
index = np.atleast_1d(times) * sfreq
index -= (first_samp if use_first_samp else 0)
# Round or truncate time indices
if use_rounding:
return np.round(index).astype(int)
else:
return index.astype(int)
def _index_as_time(index, sfreq, first_samp=0, use_first_samp=False):
"""Convert indices to time
Parameters
----------
index : list-like | int
List of ints or int representing points in time.
use_first_samp : boolean
If True, the time returned is relative to the session onset, else
relative to the recording onset.
Returns
-------
times : ndarray
Times corresponding to the index supplied.
"""
times = np.atleast_1d(index) + (first_samp if use_first_samp else 0)
return times / sfreq
class _RawShell():
"""Used for creating a temporary raw object"""
def __init__(self):
self.first_samp = None
self.last_samp = None
self._cals = None
self._rawdir = None
self._projector = None
@property
def n_times(self):
return self.last_samp - self.first_samp + 1
###############################################################################
# Writing
def _write_raw(fname, raw, info, picks, fmt, data_type, reset_range, start,
stop, buffer_size, projector, inv_comp, drop_small_buffer,
split_size, part_idx, prev_fname):
"""Write raw file with splitting
"""
if part_idx > 0:
# insert index in filename
path, base = op.split(fname)
idx = base.find('.')
use_fname = op.join(path, '%s-%d.%s' % (base[:idx], part_idx,
base[idx + 1:]))
else:
use_fname = fname
logger.info('Writing %s' % use_fname)
fid, cals = _start_writing_raw(use_fname, info, picks, data_type,
reset_range)
first_samp = raw.first_samp + start
if first_samp != 0:
write_int(fid, FIFF.FIFF_FIRST_SAMPLE, first_samp)
# previous file name and id
if part_idx > 0 and prev_fname is not None:
start_block(fid, FIFF.FIFFB_REF)
write_int(fid, FIFF.FIFF_REF_ROLE, FIFF.FIFFV_ROLE_PREV_FILE)
write_string(fid, FIFF.FIFF_REF_FILE_NAME, prev_fname)
if info['meas_id'] is not None:
write_id(fid, FIFF.FIFF_REF_FILE_ID, info['meas_id'])
write_int(fid, FIFF.FIFF_REF_FILE_NUM, part_idx - 1)
end_block(fid, FIFF.FIFFB_REF)
pos_prev = None
for first in range(start, stop, buffer_size):
last = first + buffer_size
if last >= stop:
last = stop + 1
if picks is None:
data, times = raw[:, first:last]
else:
data, times = raw[picks, first:last]
if projector is not None:
data = np.dot(projector, data)
if ((drop_small_buffer and (first > start) and
(len(times) < buffer_size))):
logger.info('Skipping data chunk due to small buffer ... '
'[done]')
break
logger.info('Writing ...')
if pos_prev is None:
pos_prev = fid.tell()
_write_raw_buffer(fid, data, cals, fmt, inv_comp)
pos = fid.tell()
this_buff_size_bytes = pos - pos_prev
if this_buff_size_bytes > split_size / 2:
raise ValueError('buffer size is too large for the given split'
'size: decrease "buffer_size_sec" or increase'
'"split_size".')
if pos > split_size:
raise logger.warning('file is larger than "split_size"')
# Split files if necessary, leave some space for next file info
if pos >= split_size - this_buff_size_bytes - 2 ** 20:
next_fname, next_idx = _write_raw(
fname, raw, info, picks, fmt,
data_type, reset_range, first + buffer_size, stop, buffer_size,
projector, inv_comp, drop_small_buffer, split_size,
part_idx + 1, use_fname)
start_block(fid, FIFF.FIFFB_REF)
write_int(fid, FIFF.FIFF_REF_ROLE, FIFF.FIFFV_ROLE_NEXT_FILE)
write_string(fid, FIFF.FIFF_REF_FILE_NAME, op.basename(next_fname))
if info['meas_id'] is not None:
write_id(fid, FIFF.FIFF_REF_FILE_ID, info['meas_id'])
write_int(fid, FIFF.FIFF_REF_FILE_NUM, next_idx)
end_block(fid, FIFF.FIFFB_REF)
break
pos_prev = pos
logger.info('Closing %s [done]' % use_fname)
if info.get('maxshield', False):
end_block(fid, FIFF.FIFFB_SMSH_RAW_DATA)
else:
end_block(fid, FIFF.FIFFB_RAW_DATA)
end_block(fid, FIFF.FIFFB_MEAS)
end_file(fid)
return use_fname, part_idx
def _start_writing_raw(name, info, sel=None, data_type=FIFF.FIFFT_FLOAT,
reset_range=True):
"""Start write raw data in file
Data will be written in float
Parameters
----------
name : string
Name of the file to create.
info : dict
Measurement info.
sel : array of int, optional
Indices of channels to include. By default all channels are included.
data_type : int
The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT),
5 (FIFFT_DOUBLE), 16 (FIFFT_DAU_PACK16), or 3 (FIFFT_INT) for raw data.
reset_range : bool
If True, the info['chs'][k]['range'] parameter will be set to unity.
Returns
-------
fid : file
The file descriptor.
cals : list
calibration factors.
"""
#
# Measurement info
#
info = pick_info(info, sel, copy=True)
#
# Create the file and save the essentials
#
fid = start_file(name)
start_block(fid, FIFF.FIFFB_MEAS)
write_id(fid, FIFF.FIFF_BLOCK_ID)
if info['meas_id'] is not None:
write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info['meas_id'])
cals = []
for k in range(info['nchan']):
#
# Scan numbers may have been messed up
#
info['chs'][k]['scanno'] = k + 1 # scanno starts at 1 in FIF format
if reset_range is True:
info['chs'][k]['range'] = 1.0
cals.append(info['chs'][k]['cal'] * info['chs'][k]['range'])
write_meas_info(fid, info, data_type=data_type, reset_range=reset_range)
#
# Start the raw data
#
if info.get('maxshield', False):
start_block(fid, FIFF.FIFFB_SMSH_RAW_DATA)
else:
start_block(fid, FIFF.FIFFB_RAW_DATA)
return fid, cals
def _write_raw_buffer(fid, buf, cals, fmt, inv_comp):
"""Write raw buffer
Parameters
----------
fid : file descriptor
an open raw data file.
buf : array
The buffer to write.
cals : array
Calibration factors.
fmt : str
'short', 'int', 'single', or 'double' for 16/32 bit int or 32/64 bit
float for each item. This will be doubled for complex datatypes. Note
that short and int formats cannot be used for complex data.
inv_comp : array | None
The CTF compensation matrix used to revert compensation
change when reading.
"""
if buf.shape[0] != len(cals):
raise ValueError('buffer and calibration sizes do not match')
if fmt not in ['short', 'int', 'single', 'double']:
raise ValueError('fmt must be "short", "single", or "double"')
if np.isrealobj(buf):
if fmt == 'short':
write_function = write_dau_pack16
elif fmt == 'int':
write_function = write_int
elif fmt == 'single':
write_function = write_float
else:
write_function = write_double
else:
if fmt == 'single':
write_function = write_complex64
elif fmt == 'double':
write_function = write_complex128
else:
raise ValueError('only "single" and "double" supported for '
'writing complex data')
if inv_comp is not None:
buf = np.dot(inv_comp / np.ravel(cals)[:, None], buf)
else:
buf = buf / np.ravel(cals)[:, None]
write_function(fid, FIFF.FIFF_DATA_BUFFER, buf)
def _my_hilbert(x, n_fft=None, envelope=False):
""" Compute Hilbert transform of signals w/ zero padding.
Parameters
----------
x : array, shape (n_times)
The signal to convert
n_fft : int, length > x.shape[-1] | None
How much to pad the signal before Hilbert transform.
Note that signal will then be cut back to original length.
envelope : bool
Whether to compute amplitude of the hilbert transform in order
to return the signal envelope.
Returns
-------
out : array, shape (n_times)
The hilbert transform of the signal, or the envelope.
"""
from scipy.signal import hilbert
n_fft = x.shape[-1] if n_fft is None else n_fft
n_x = x.shape[-1]
out = hilbert(x, N=n_fft)[:n_x]
if envelope is True:
out = np.abs(out)
return out
def _check_raw_compatibility(raw):
"""Check to make sure all instances of Raw
in the input list raw have compatible parameters"""
for ri in range(1, len(raw)):
if not isinstance(raw[ri], type(raw[0])):
raise ValueError('raw[%d] type must match' % ri)
if not raw[ri].info['nchan'] == raw[0].info['nchan']:
raise ValueError('raw[%d][\'info\'][\'nchan\'] must match' % ri)
if not raw[ri].info['bads'] == raw[0].info['bads']:
raise ValueError('raw[%d][\'info\'][\'bads\'] must match' % ri)
if not raw[ri].info['sfreq'] == raw[0].info['sfreq']:
raise ValueError('raw[%d][\'info\'][\'sfreq\'] must match' % ri)
if not set(raw[ri].info['ch_names']) == set(raw[0].info['ch_names']):
raise ValueError('raw[%d][\'info\'][\'ch_names\'] must match' % ri)
if not all(raw[ri]._cals == raw[0]._cals):
raise ValueError('raw[%d]._cals must match' % ri)
if len(raw[0].info['projs']) != len(raw[ri].info['projs']):
raise ValueError('SSP projectors in raw files must be the same')
if not all(_proj_equal(p1, p2) for p1, p2 in
zip(raw[0].info['projs'], raw[ri].info['projs'])):
raise ValueError('SSP projectors in raw files must be the same')
if not all(r.orig_format == raw[0].orig_format for r in raw):
warnings.warn('raw files do not all have the same data format, '
'could result in precision mismatch. Setting '
'raw.orig_format="unknown"')
raw[0].orig_format = 'unknown'
def concatenate_raws(raws, preload=None, events_list=None):
"""Concatenate raw instances as if they were continuous. Note that raws[0]
is modified in-place to achieve the concatenation.
Parameters
----------
raws : list
list of Raw instances to concatenate (in order).
preload : bool, or None
If None, preload status is inferred using the preload status of the
raw files passed in. True or False sets the resulting raw file to
have or not have data preloaded.
events_list : None | list
The events to concatenate. Defaults to None.
Returns
-------
raw : instance of Raw
The result of the concatenation (first Raw instance passed in).
events : ndarray of int, shape (n events, 3)
The events. Only returned if `event_list` is not None.
"""
if events_list is not None:
if len(events_list) != len(raws):
raise ValueError('`raws` and `event_list` are required '
'to be of the same length')
first, last = zip(*[(r.first_samp, r.last_samp) for r in raws])
events = concatenate_events(events_list, first, last)
raws[0].append(raws[1:], preload)
if events_list is None:
return raws[0]
else:
return raws[0], events
def _check_update_montage(info, montage, path=None, update_ch_names=False):
""" Helper function for eeg readers to add montage"""
if montage is not None:
if not isinstance(montage, (string_types, Montage)):
err = ("Montage must be str, None, or instance of Montage. "
"%s was provided" % type(montage))
raise TypeError(err)
if montage is not None:
if isinstance(montage, string_types):
montage = read_montage(montage, path=path)
_set_montage(info, montage, update_ch_names=update_ch_names)
missing_positions = []
exclude = (FIFF.FIFFV_EOG_CH, FIFF.FIFFV_MISC_CH,
FIFF.FIFFV_STIM_CH)
for ch in info['chs']:
if not ch['kind'] in exclude:
if np.unique(ch['loc']).size == 1:
missing_positions.append(ch['ch_name'])
# raise error if positions are missing
if missing_positions:
err = ("The following positions are missing from the montage "
"definitions: %s. If those channels lack positions "
"because they are EOG channels use the eog parameter."
% str(missing_positions))
raise KeyError(err)
| bsd-3-clause |
harry0519/nsnqt | nsnqtlib/strategies/report.py | 1 | 3607 | # -*- coding:utf-8 -*-
import pandas as pd
import time,datetime
import matplotlib.pyplot as plt
import random
pd.set_option('display.height',1000)
pd.set_option('display.max_rows',500)
pd.set_option('display.max_columns',50)
pd.set_option('display.width',1000)
class report(object):
def __init__(self,df):
'''
df should be follow this format:"index(title is none)" "stock","buy_date","sell_date","holddays","profit"
'''
self.df = df
# print (sorted(df["buy_date"])[0])
# print (sorted(df["sell_date"])[-1])
# import sys
# sys.exit(0)
def formatdate(self,s):
try:
t = time.strptime(s, "%Y/%m/%d")
y,m,d = t[0:3]
rst = datetime.datetime(y,m,d).strftime('%Y-%m-%d')
except:
print (s)
rst = s
return rst
def positiongain(self,start="2011-01-01",end="2016-11-18"):
totalmoney = 100
leftmoney = 100
holds = []
datelist = [i.strftime('%Y-%m-%d') for i in pd.date_range(start, end)]
result = {d:[] for d in datelist}
gains = {d:0 for d in datelist}
df = self.df
for i in df.values:
i[2] = self.formatdate(i[2])
i[3] = self.formatdate(i[3])
result[i[2]].append(i)
for date in datelist:
currentholdnum = len(holds)
current_day_could_buy_num = len(result[date])
if current_day_could_buy_num >=1 and currentholdnum < 10:
buymoney = leftmoney/(10-currentholdnum)
if current_day_could_buy_num + currentholdnum <= 10:
leftmoney = leftmoney - buymoney*current_day_could_buy_num
holds.extend([(i,buymoney) for i in result[date]])
else:
leftmoney = 0
holds.extend([(i,buymoney) for i in random.sample(result[date],10-currentholdnum)])
for d in holds[:]:
if d[0][3]>= date :
holds.remove(d)
leftmoney += d[1]*(d[0][5]+1)
totalmoney += d[1]*d[0][5]
gains[date] = totalmoney
newdf = pd.DataFrame(data=[gains[i] for i in datelist], index=datelist,columns=["a",])
newdf["date"] = newdf.index
newdf.plot(x="date", y="a", kind='area')
plt.savefig("positiongain_from_{}_to_{}.png".format(start,end))
plt.show()
def cumulative_graph(self,datafile="",start="2013-03-01",end="2016-11-18"):
date = [i.strftime('%Y-%m-%d') for i in pd.date_range(start, end)]
result = {d:[0,0] for d in date}
df = self.df
for i in df.values:
i[2] = self.formatdate(i[2])
i[3] = self.formatdate(i[3])
result[i[3]][0] += i[5]
result[i[3]][1] += 1
newdf = pd.DataFrame(data=[[result[i][0],result[i][1]] for i in date], index=date,columns=["a","b"])
newdf["data"] = newdf["a"].cumsum()
newdf["buys"] = newdf["b"].cumsum()
newdf["c"] = (newdf["a"]/newdf["b"]).fillna(0)
newdf["d"] = newdf["c"].cumsum().fillna(0)
newdf["date"] = newdf.index
print (newdf)
newdf.plot(x="date", y="d", kind='line')
plt.savefig("test_buys_mean.png")
plt.show()
if __name__ == '__main__':
df = pd.read_csv('macd.csv')
r = report(df)
r.positiongain(start="2011-01-01",end="2016-11-18")
| bsd-2-clause |
muthujothi/Kaggle-schizophrenia-classification | benchmark_nn.py | 1 | 2346 | import pandas as pd
import numpy as np
from sklearn import linear_model
import cPickle as pickle
from math import sqrt
#from pybrain.datasets.supervised import SupervisedDataSet as SDS
from pybrain.datasets import ClassificationDataSet as CDS
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.structure.modules import SoftmaxLayer
from pybrain.structure.modules import SigmoidLayer
hidden_size = 100
output_model_file = 'C:/LearningMaterials/Kaggle/Mlsp/mode2.pkl'
#Load the FNC train data as a dataframe
df_1 = pd.read_csv('C:/LearningMaterials/Kaggle/Mlsp/train_FNC.csv')
#Load the SBM train data as a dataframe
df_2 = pd.read_csv('C:/LearningMaterials/Kaggle/Mlsp/train_SBM.csv')
#Leave the first column in both the datasets to load the features as a dataframe
df_train_fnc = df_1.ix[:, 1:]
df_train_sbm = df_2.ix[:,1:]
#Geta numpy (n_samples X n_features) representation for each of the data frame.
np_fnc = df_train_fnc.values
np_sbm = df_train_sbm.values
#column wise stack both the numpy matrices to get a feature matrix X
X = np.hstack((np_fnc,np_sbm))
#shud be 4 X 410
#print X.shape
#Load the labels data
df_3 = pd.read_csv('C:/LearningMaterials/Kaggle/Mlsp/train_labels.csv')
df_train_labels = df_3.ix[:,1]
y = df_train_labels.values
y = y.reshape(-1, 1)
print "Dimensions of input feature vector X "
print X.shape
input_size = X.shape[1]
#Get a linear model from the sklearn
#clf = linear_model.LogisticRegression(C=0.16,penalty='l1', tol=0.001, fit_intercept=True)
#clf.fit(X, y)
#Get a network that trains based on backpropagation method using the training data
ds = CDS( input_size, class_labels=['Healthy','Schizo'] )
ds.setField( 'input', X )
ds.setField( 'target', y)
print len(ds)
# init and train
net = buildNetwork( input_size, hidden_size, 1, bias = True, outclass=SigmoidLayer )#feature vector, hidden layer size,
trainer = BackpropTrainer( net,ds )
trainer.trainUntilConvergence( verbose = True, validationProportion = 0.15, maxEpochs = 1000, continueEpochs = 10 )
pickle.dump( net, open( output_model_file, 'wb' ))
p = net.activateOnDataset( ds )
np.savetxt('nn_sub2.csv', p, delimiter=",", fmt = '%1.4f')
#print net['in']
#print net['hidden0']
#print net['out']
#for pred in p:
#print round(pred[0],4)
# print pred[0]
| mit |
wathen/PhD | MHD/FEniCS/MHD/CG/PicardIter_Direct/DecoupleTest/KappaChange/tests/MD.py | 1 | 12360 | #!/usr/bin/python
# interpolate scalar gradient onto nedelec space
from dolfin import *
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
Print = PETSc.Sys.Print
# from MatrixOperations import *
import numpy as np
#import matplotlib.pylab as plt
import PETScIO as IO
import common
import scipy
import scipy.io
import time as t
import BiLinear as forms
import IterOperations as Iter
import MatrixOperations as MO
import CheckPetsc4py as CP
import Solver as S
import ExactSol
import P as Precond
import cProfile, pstats, StringIO
m = 2
IterType = 'MD'
errL2u =np.zeros((m-1,1))
errH1u =np.zeros((m-1,1))
errL2p =np.zeros((m-1,1))
errL2b =np.zeros((m-1,1))
errCurlb =np.zeros((m-1,1))
errL2r =np.zeros((m-1,1))
errH1r =np.zeros((m-1,1))
l2uorder = np.zeros((m-1,1))
H1uorder =np.zeros((m-1,1))
l2porder = np.zeros((m-1,1))
l2border = np.zeros((m-1,1))
Curlborder =np.zeros((m-1,1))
l2rorder = np.zeros((m-1,1))
H1rorder = np.zeros((m-1,1))
NN = np.zeros((m-1,1))
DoF = np.zeros((m-1,1))
Velocitydim = np.zeros((m-1,1))
Magneticdim = np.zeros((m-1,1))
Pressuredim = np.zeros((m-1,1))
Lagrangedim = np.zeros((m-1,1))
Wdim = np.zeros((m-1,1))
iterations = np.zeros((m-1,1))
SolTime = np.zeros((m-1,1))
udiv = np.zeros((m-1,1))
MU = np.zeros((m-1,1))
level = np.zeros((m-1,1))
NSave = np.zeros((m-1,1))
Mave = np.zeros((m-1,1))
TotalTime = np.zeros((m-1,1))
nn = 2
dim = 2
ShowResultPlots = 'yes'
split = 'Linear'
nn = 2
mm = 4
MUsave = np.zeros((mm*3,1))
MUit = np.zeros((m-1,mm*3))
print MUit[0,0]
dim = 2
ShowResultPlots = 'yes'
split = 'Linear'
MU[0]= 1e0
R = 0.01
jj = -2
for yy in xrange(1,mm+1):
jj +=3
MU =(R*10**(yy))
print "++++++++",MU
# MU[0]= 1e0
for xx in xrange(1,m):
MUsave[jj-1] = MU
print xx
level[xx-1] = xx+3
nn = 2**(level[xx-1])
# Create mesh and define function space
nn = int(nn)
NN[xx-1] = nn/2
mesh = RectangleMesh(0, 0, 1, 1, nn, nn,'left')
parameters["form_compiler"]["precision"] = 15
parameters["form_compiler"]["quadrature_degree"] = -1
order = 2
parameters['reorder_dofs_serial'] = False
Velocity = VectorFunctionSpace(mesh, "CG", order)
Pressure = FunctionSpace(mesh, "CG", order-1)
Magnetic = FunctionSpace(mesh, "N1curl", order)
Lagrange = FunctionSpace(mesh, "CG", order)
W = MixedFunctionSpace([Velocity,Pressure,Magnetic,Lagrange])
# W = Velocity*Pressure*Magnetic*Lagrange
Velocitydim[xx-1] = Velocity.dim()
Pressuredim[xx-1] = Pressure.dim()
Magneticdim[xx-1] = Magnetic.dim()
Lagrangedim[xx-1] = Lagrange.dim()
Wdim[xx-1] = W.dim()
print "\n\nW: ",Wdim[xx-1],"Velocity: ",Velocitydim[xx-1],"Pressure: ",Pressuredim[xx-1],"Magnetic: ",Magneticdim[xx-1],"Lagrange: ",Lagrangedim[xx-1],"\n\n"
dim = [Velocity.dim(), Pressure.dim(), Magnetic.dim(), Lagrange.dim()]
def boundary(x, on_boundary):
return on_boundary
u0, p0,b0, r0, Laplacian, Advection, gradPres,CurlCurl, gradR, NS_Couple, M_Couple = ExactSol.MHD2D(4,1)
bcu = DirichletBC(W.sub(0),u0, boundary)
bcp = DirichletBC(W.sub(1),p0, boundary)
bcb = DirichletBC(W.sub(2),b0, boundary)
bcr = DirichletBC(W.sub(3),r0, boundary)
# bc = [u0,p0,b0,r0]
bcs = [bcu,bcb,bcr]
FSpaces = [Velocity,Pressure,Magnetic,Lagrange]
(u, p, b, r) = TrialFunctions(W)
(v, q, c,s ) = TestFunctions(W)
kappa = 1.0
Mu_m =10
F_NS = -kappa*Laplacian+Advection+gradPres-kappa*NS_Couple
if kappa == 0:
F_M = Mu_m*CurlCurl+gradR -kappa*M_Couple
else:
F_M = Mu_m*kappa*CurlCurl+gradR -kappa*M_Couple
params = [MU,Mu_m,kappa]
F_NS = -kappa*Laplacian+Advection+gradPres-kappa*NS_Couple
F_M = Mu_m*MU*CurlCurl+gradR -kappa*M_Couple
# params = [Mu,Mu_m,kappa]
u_k,p_k,b_k,r_k = common.InitialGuess(FSpaces,[u0,p0,b0,r0],[F_NS,F_M],params,Neumann=Expression(("0","0")),options ="New")
p_k.vector()[:]= p_k.vector().array()+np.abs(np.min(p_k.vector().array()))
# bcu.apply(u_k)
# bcb.apply(b_k)
# bcr.apply(r_k)
x = Iter.u_prev(u_k,p_k,b_k,r_k)
ns,maxwell,CoupleTerm,Lmaxwell,Lns = forms.MHD2D(mesh, W,F_M,F_NS, u_k,b_k,params,IterType)
print CoupleTerm
parameters['linear_algebra_backend'] = 'PETSc'
RHSform = forms.PicardRHS(mesh, W, u_k, p_k, b_k, r_k, params)
bcu = DirichletBC(W.sub(0),Expression(("0","0")), boundary)
bcb = DirichletBC(W.sub(2),Expression(("0","0")), boundary)
bcr = DirichletBC(W.sub(3),Expression(("0")), boundary)
bcs = [bcu,bcb,bcr]
eps = 1.0 # error measure ||u-u_k||
tol = 1.0E-4 # iteration counter
maxiter = 20 # max no of iterations allowed
SolutionTime = 0
iter = 0
outer = 0
parameters['linear_algebra_backend'] = 'uBLAS'
p = forms.Preconditioner(mesh,W,u_k,b_k,params,IterType)
PP,Pb = assemble_system(p, Lns,bcs)
NS_is = PETSc.IS().createGeneral(range(Velocity.dim()+Pressure.dim()))
M_is = PETSc.IS().createGeneral(range(Velocity.dim()+Pressure.dim(),W.dim()))
if IterType == "Full" or IterType == "MD":
(pQ) = TrialFunction(Pressure)
(qQ) = TestFunction(Pressure)
print kappa
Q = assemble(inner(pQ,qQ)*dx)
L = assemble(inner(grad(pQ),grad(qQ))*dx)
n = FacetNormal(mesh)
fp = kappa*inner(grad(qQ), grad(pQ))*dx+inner((u_k[0]*grad(pQ)[0]+u_k[1]*grad(pQ)[1]),qQ)*dx + (1/2)*div(u_k)*inner(pQ,qQ)*dx - (1/2)*(u_k[0]*n[0]+u_k[1]*n[1])*inner(pQ,qQ)*ds
L = CP.Assemble(L)
if IterType == "CD":
AA, bb = assemble_system(maxwell+ns+CoupleTerm, (Lmaxwell + Lns) - RHSform, bcs)
A,b = CP.Assemble(AA,bb)
P = CP.Assemble(PP)
u = b.duplicate()
Mits = 0
NSits = 0
time
InnerTol = []
OuterTol = []
OuterTol = 1e-6
# InnerTol.append(1e-6*((iter)*50))
InnerTol = 1e-6
TotalStart = t.clock()
while eps > tol and iter < maxiter:
iter += 1
if IterType == "CD":
bb = assemble((Lmaxwell + Lns) - RHSform)
for bc in bcs:
bc.apply(bb)
A,b = CP.Assemble(AA,bb)
P = CP.Assemble(PP)
print b
else:
# tic()
AA, bb = assemble_system(maxwell+ns+CoupleTerm, (Lmaxwell + Lns) - RHSform, bcs)
A,b = CP.Assemble(AA,bb)
del AA
F = assemble(fp)
F = CP.Assemble(F)
P = CP.Assemble(PP)
# P = S.ExactPrecond(PP,Q,L,F,FSpaces)
Mass = CP.Assemble(Q)
# print "Assemble time >>>>>>",toc()
# if iter == 1:
uu = b.duplicate()
# else:
# uu = uu
pr = cProfile.Profile()
start = t.clock()
pr.enable()
print InnerTol
print OuterTol
u,it1,it2 = S.solve(A,b,uu,P,[NS_is,M_is],FSpaces,IterType,OuterTol,InnerTol,Mass,L,F)
del A
# print InnerTol[iter-1]
pr.disable()
# time = toc()
time = (t.clock() - start)
s = StringIO.StringIO()
print "Solve time >>>>>>", time
print it1,it2
NSits += it1
Mits +=it2
SolutionTime = SolutionTime +time
# tic()
u, p, b, r, eps= Iter.PicardToleranceDecouple(u,x,FSpaces,dim,"2",iter)
u_k.assign(u)
p_k.assign(p)
b_k.assign(b)
r_k.assign(r)
# print "Correction time >>>>>>", toc()
# p_k.vector()[:]= p_k.vector().array()+np.abs(np.min(p_k.vector().array()))
x = Iter.u_prev(u_k,p_k,b_k,r_k)
if eps > 1e2 and iter>10:
iter = 10000000000000
break
# u_k,b_k,epsu,epsb=Direct.PicardTolerance(x,u_k,b_k,FSpaces,dim,"inf",iter)
print xx
MUit[xx-1,jj-1] = iter
MUit[xx-1,jj] = (float(NSits)/iter)
MUit[xx-1,jj+1] = (float(Mits)/iter)
TotalTime[xx-1] = t.clock()-TotalStart
SolTime[xx-1] = SolutionTime/iter
ue = u0
pe = p0
be = b0
re = r0
ExactSolution = [ue,pe,be,re]
#errL2u[xx-1], errH1u[xx-1], errL2p[xx-1], errL2b[xx-1], errCurlb[xx-1], errL2r[xx-1], errH1r[xx-1] = Iter.Errors(x,mesh,FSpaces,ExactSolution,order,dim)
if xx == 1:
l2uorder[xx-1] = 0
else:
l2uorder[xx-1] = np.abs(np.log2(errL2u[xx-2]/errL2u[xx-1]))
H1uorder[xx-1] = np.abs(np.log2(errH1u[xx-2]/errH1u[xx-1]))
l2porder[xx-1] = np.abs(np.log2(errL2p[xx-2]/errL2p[xx-1]))
l2border[xx-1] = np.abs(np.log2(errL2b[xx-2]/errL2b[xx-1]))
Curlborder[xx-1] = np.abs(np.log2(errCurlb[xx-2]/errCurlb[xx-1]))
l2rorder[xx-1] = np.abs(np.log2(errL2r[xx-2]/errL2r[xx-1]))
H1rorder[xx-1] = np.abs(np.log2(errH1r[xx-2]/errH1r[xx-1]))
import pandas as pd
# print "\n\n Velocity convergence"
# VelocityTitles = ["l","Total DoF","V DoF","Soln Time","V-L2","L2-order","V-H1","H1-order"]
# VelocityValues = np.concatenate((level,Wdim,Velocitydim,SolTime,errL2u,l2uorder,errH1u,H1uorder),axis=1)
# VelocityTable= pd.DataFrame(VelocityValues, columns = VelocityTitles)
# pd.set_option('precision',3)
# VelocityTable = MO.PandasFormat(VelocityTable,"V-L2","%2.4e")
# VelocityTable = MO.PandasFormat(VelocityTable,'V-H1',"%2.4e")
# VelocityTable = MO.PandasFormat(VelocityTable,"H1-order","%1.2f")
# VelocityTable = MO.PandasFormat(VelocityTable,'L2-order',"%1.2f")
# print VelocityTable.to_latex()
# print "\n\n Pressure convergence"
# PressureTitles = ["l","Total DoF","P DoF","Soln Time","P-L2","L2-order"]
# PressureValues = np.concatenate((level,Wdim,Pressuredim,SolTime,errL2p,l2porder),axis=1)
# PressureTable= pd.DataFrame(PressureValues, columns = PressureTitles)
# pd.set_option('precision',3)
# PressureTable = MO.PandasFormat(PressureTable,"P-L2","%2.4e")
# PressureTable = MO.PandasFormat(PressureTable,'L2-order',"%1.2f")
# print PressureTable.to_latex()
# print "\n\n Iteration table"
# if IterType == "Full":
# IterTitles = ["l","DoF","AV solve Time","Total picard time","picard iterations","Av Outer its","Av Inner its",]
# else:
# IterTitles = ["l","DoF","AV solve Time","Total picard time","picard iterations","Av NS iters","Av M iters"]
# IterValues = np.concatenate((level,Wdim,SolTime,TotalTime,iterations,NSave,Mave),axis=1)
# IterTable= pd.DataFrame(IterValues, columns = IterTitles)
# if IterType == "Full":
# IterTable = MO.PandasFormat(IterTable,'Av Outer its',"%2.1f")
# IterTable = MO.PandasFormat(IterTable,'Av Inner its',"%2.1f")
# else:
# IterTable = MO.PandasFormat(IterTable,'Av NS iters',"%2.1f")
# IterTable = MO.PandasFormat(IterTable,'Av M iters',"%2.1f")
# print IterTable.to_latex()
# print " \n Outer Tol: ",OuterTol, "Inner Tol: ", InnerTol
print MUit
print MUsave
import pandas as pd
LatexTitles = ["l","DoF"]
for x in xrange(1,mm+1):
LatexTitles.extend(["it","it","it"])
pd.set_option('precision',3)
LatexValues = np.concatenate((level,Wdim,MUit), axis=1)
title = np.concatenate((np.array([[0,0]]),MUsave.T),axis=1)
MU = ["0","0"]
for x in xrange(1,mm+1):
MU.extend(["Full","MD","CD"])
LatexValues = np.vstack((title,LatexValues))
LatexTable = pd.DataFrame(LatexValues, columns = LatexTitles)
name = "Output/"+IterType+"kappatest"
# LatexTable.to_csv(name)
print LatexTable.to_latex()
# # # if (ShowResultPlots == 'yes'):
# plot(u_k)
# # plot(interpolate(ue,Velocity))
# plot(p_k)
# # pe = interpolate(pe,Pressure)
# # pe.vector()[:] -= np.max(pe.vector().array() )/2
# # plot(interpolate(pe,Pressure))
# plot(b_k)
# # plot(interpolate(be,Magnetic))
# plot(r_k)
# # plot(interpolate(re,Lagrange))
# # # interactive()
# interactive()
| mit |
rabrahm/ceres | fies/fiesutils.py | 1 | 9230 | import matplotlib
matplotlib.use("Agg")
from astropy.io import fits as pyfits
import numpy as np
import scipy
import copy
import glob
import os
import matplotlib.pyplot as plt
import sys
from pylab import *
base = "../"
sys.path.append(base+"utils/GLOBALutils")
import GLOBALutils
def get_thar_offsets(lines_thar, order_dir='wavcals/', pref='order_', suf='.iwdat', delt_or=10, del_width=200.,binning=1):
start_or = int(.5*delt_or)
xcs = []
for ii in range(delt_or,len(lines_thar)-delt_or):
thar_order = lines_thar[ii]
xct = []
for order in range(ii-start_or,ii+start_or):
order_s = str(order)
if (order < 10):
order_s = '0' + order_s
if os.access(order_dir+pref+order_s+suf,os.F_OK):
f = open(order_dir+pref+order_s+suf,'r')
llins = f.readlines()
if True:
pixel_centers_0 = []
for line in llins:
w = line.split()
nlines = int(w[0])
for j in range(nlines):
pixel_centers_0.append(float(w[2*j+1])*1./float(binning))
pixel_centers_0 = np.array(pixel_centers_0).astype('int')
#plot(thar_order)
#plot(pixel_centers_0,thar_order[pixel_centers_0],'ro')
#print order, order_s
#show()
ml = np.array(pixel_centers_0) - 2
mh = np.array(pixel_centers_0) + 2
if len(ml)>0:
xc,offs = GLOBALutils.XCorPix( thar_order, ml, mh, del_width=del_width)
else:
xc = np.zeros(len(offs))
if len(xct) == 0:
xct = xc.copy()
else:
xct = np.vstack((xct,xc))
if len(xcs) == 0:
xcs = xct.copy()
else:
xcs += xct
maxes, maxvels = [],[]
for i in range(xcs.shape[0]):
maxes.append(xcs[i].max())
maxvels.append(offs[np.argmax(xcs[i])])
#plot(offs,xcs[i])
#show()
maxes,maxvels = np.array(maxes),np.array(maxvels)
orders_offset = -start_or + np.argmax(maxes)
rough_shift = maxvels[np.argmax(maxes)]
return orders_offset, rough_shift
def ra_from_sec(ra,time=True):
ra = float(ra)
sign = ' '
if ra < 0:
sign = '-'
ra *= -1
hh = ra/3600.
mm = (hh - int(hh))*60.
ss = (mm - int(mm))*60.
shh = str(int(hh))
smm = str(int(mm))
sss = str(np.around(ss,2))
if hh<10:
shh = '0' + shh
if mm<10:
smm = '0' + smm
if ss<10:
sss = '0' + sss
return sign + shh + ':' + smm + ':' + sss
def FileClassify(diri, log,binning=1,mode='F1', dark_corr=False):
"""
Classifies all files in a directory and writes a night log of science images
"""
# define output lists
sim_sci = []
biases = []
flats = []
ThAr_ref = []
ThAr_ref_dates = []
ThAr_co = []
ThAr_co_dates = []
ThAr_sim = []
ThAr_sim_dates = []
flat_ref_dates = []
bias_ref_dates = []
obnames = []
exptimes = []
darks = []
flats_co = []
flats_co_dates = []
sdarks = []
if dark_corr and os.access(diri+'/darks.txt',os.F_OK):
fd = open(diri+'/darks.txt','r')
ds = fd.readlines()
for dk in ds:
sdarks.append(diri+dk[:-1])
sdarks = np.array(sdarks)
f = open(log,'w')
#Do not consider the images specified in dir+badfiles.txt
bad_files = []
if os.access(diri+'bad_files.txt',os.F_OK):
bf = open(diri+'bad_files.txt')
linesbf = bf.readlines()
for line in linesbf:
bad_files.append(diri+line[:-1])
bf.close()
all_files = glob.glob(diri+"/*fits")
for archivo in all_files:
#print archivo
dump = False
for bf in bad_files:
if archivo == bf:
dump = True
break
isdark=False
for df in sdarks:
if archivo == df:
darks.append(archivo)
isdark=True
if dump == False and isdark == False:
h = pyfits.open(archivo)
print archivo, h[0].header['OBJECT'], h[0].header['EXPTIME']
hd = pyfits.getheader(archivo)
print mode, h[0].header['FIFMSKNM'],
if int(h[0].header['DETXBIN']) == binning and int(h[0].header['DETYBIN']) == binning and (mode in h[0].header['FIFMSKNM']) and h[0].header['IMAGETYP'] != 'COUNTTEST':
print archivo, h[0].header['IMAGETYP'], h[0].header['SHSTAT'], h[0].header['EXPTIME'], h[0].header['OBJECT'], h[0].header['TCSTGT'], int(h[0].header['DETYBIN'])
if h[0].header['IMAGETYP'] == 'BIAS' or 'bias' in h[0].header['OBJECT']:
biases.append(archivo)
mjd, mjd0 = mjd_fromheader2(h)
bias_ref_dates.append( mjd )
elif h[0].header['IMAGETYP'] == 'FLAT' or 'flat' in h[0].header['OBJECT']:
flats.append(archivo)
mjd, mjd0 = mjd_fromheader2(h)
flat_ref_dates.append( mjd )
if h[0].header['FICARMID'] == 6 and h[0].header['FILMP1'] == 1 and h[0].header['FILMP6']==0:
flats_co.append(archivo)
mjd, mjd0 = mjd_fromheader2(h)
flats_co_dates.append( mjd )
else:
flats.append(archivo)
mjd, mjd0 = mjd_fromheader2(h)
flat_ref_dates.append( mjd )
sc = pyfits.getdata(archivo)
#plot(sc[1000])
elif h[0].header['IMAGETYP'] == 'WAVE' or h[0].header['IMAGETYP'] == 'WAVE,LAMP':
ThAr_ref.append(archivo)
mjd, mjd0 = mjd_fromheader2(h)
ThAr_ref_dates.append( mjd )
elif ((mode=='F3' or mode=='F4') and h[0].header['FICARMID'] == 6 and h[0].header['FILMP4'] == 0 and h[0].header['FILMP7']==1)\
or (mode=='F1' and h[0].header['FICARMID'] == 2 and h[0].header['FILMP4'] == 0 and h[0].header['FILMP7']==1):
ThAr_ref.append(archivo)
mjd, mjd0 = mjd_fromheader2(h)
ThAr_ref_dates.append( mjd )
elif h[0].header['FICARMID'] == 6 and h[0].header['FILMP4'] == 1 and h[0].header['FILMP7']==0:
ThAr_co.append(archivo)
mjd, mjd0 = mjd_fromheader2(h)
ThAr_co_dates.append( mjd )
elif h[0].header['FICARMID'] == 6 and h[0].header['FILMP4'] == 1 and h[0].header['FILMP7']==1:
ThAr_sim.append(archivo)
mjd, mjd0 = mjd_fromheader2(h)
ThAr_sim_dates.append( mjd )
elif (mode=='F3' and h[0].header['FICARMID'] == 2) or (mode == 'F1' and h[0].header['FICARMID'] == 5)\
or (mode=='F4' and (h[0].header['FICARMID'] == 5 or h[0].header['FICARMID'] == 4 or h[0].header['FICARMID'] == 20 ) ):
sim_sci.append(archivo)
obname = h[0].header['OBJECT']
obnames.append( obname )
ra = ra_from_sec(h[0].header['RA']*3600.*24./360.)
delta = ra_from_sec(h[0].header['DEC']*3600.)
airmass= float(h[0].header['AIRMASS'])
texp = float(h[0].header['EXPTIME'])
date = h[0].header['DATE-OBS']
hour = date[11:]
date = date[:10]
exptimes.append( texp )
if h[0].header['FILMP4'] == 1:
simult = 'SIMULT'
else:
simult = 'NO_SIMULT'
line = "%-15s %10s %10s %8.2f %4.2f %8s %11s %s %s\n" % (obname, ra, delta, texp, airmass, date, hour, archivo, simult)
f.write(line)
#show()
flat_ref_dates = np.array(flat_ref_dates)
flats = np.array(flats)
IS = np.argsort(flat_ref_dates)
flat_ref_dates = flat_ref_dates[IS]
flats = flats[IS]
#for i in range(len(flats)):
# print 'flat',flats[i], flat_ref_dates[i]
bias_ref_dates = np.array(bias_ref_dates)
biases = np.array(biases)
IS = np.argsort(bias_ref_dates)
bias_ref_dates = bias_ref_dates[IS]
biases = biases[IS]
#for i in range(len(biases)):
# print 'bias',biases[i], bias_ref_dates[i]
f.close()
return biases, np.array(flats), np.array(ThAr_ref), sim_sci, np.array(ThAr_ref_dates), obnames, exptimes, np.array(darks), np.array(flats_co), np.array(flats_co_dates),np.array(ThAr_sim), np.array(ThAr_sim_dates),np.array(ThAr_co), np.array(ThAr_co_dates)
def get_darktimes(darks):
times = []
for dark in darks:
hd = pyfits.getheader(dark)
times.append(hd['EXPTIME'])
return np.unique(np.sort(np.array(times))), np.array(times)
def mjd_fromheader2(h):
"""
return modified Julian date from header
"""
datetu = h[0].header['DATE-OBS']
mjd0,mjd,i = GLOBALutils.iau_cal2jd(int(datetu[:4]),int(datetu[5:7]),int(datetu[8:10]))
ho = int(datetu[11:13])
mi = int(datetu[14:16])
se = float(datetu[17:])
ut = float(ho) + float(mi)/60.0 + float(se)/3600.0
mjd_start = mjd + ut/24.0
secinday = 24*3600.0
fraction = 0.5
texp = h[0].header['EXPTIME'] #sec
mjd = mjd_start + (fraction * texp) / secinday
return mjd, mjd0
def get_RONGAIN(hd):
return hd['RDNOISE'], hd['GAIN']
def MedianCombine(ImgList, zero='none', binning=1, oii=100, off=2148):
"""
Median combine a list of images
"""
n = len(ImgList)
if n==0:
raise ValueError("empty list provided!")
h = pyfits.open(ImgList[0])
d1 = h[1].data
h1 = h[1].header
d1 = OverscanTrim(d1, binning=binning,ii=oii, ff=off)
if zero != 'none':
z = pyfits.open(zero)[0]
d1 -= z.data
factor = 1.25
if (n < 3):
factor = 1
ron1,gain1 = get_RONGAIN(h[1].header)
ron1 = factor * ron1 / np.sqrt(n)
if n>1:
for i in range(n-1):
td = pyfits.open(ImgList[i+1])
if zero == 'none':
d1 = np.dstack((d1,OverscanTrim(td[1].data, binning=binning, ii=oii, ff=off)))
else:
d1 = np.dstack((d1,OverscanTrim(td[1].data, binning=binning, ii=oii, ff=off)-z.data))
d1 = np.median(d1,axis=2)
return d1, ron1, gain1
def OverscanTrim(dat,binning=1,ii=100,ff=2148):
"""
Overscan correct and Trim a refurbished FEROS image
"""
#ff = 2098
#ii = 50
ff = int(np.around(ff/binning))
ii = int(np.around(ii/binning))
os = dat[:,ff:]
s = np.median(os)
newdata = dat[:,ii:ff].copy() - s
return newdata
| mit |
olrosales/PyFME | examples/example_004_stationary_horizontal_turn.py | 2 | 4183 | # -*- coding: utf-8 -*-
"""
Python Flight Mechanics Engine (PyFME).
Copyright (c) AeroPython Development Team.
Distributed under the terms of the MIT License.
Example 004
-------
Cessna 172, ISA1976 integrated with Flat Earth (Euler angles).
Example with trimmed aircraft: stationary, horizontal turn.
The purpose of this example is to check if during the aircraft's
evolution it maintains the initially trimmed flight condition.
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from pyfme.aircrafts import Cessna172
from pyfme.environment.environment import Environment
from pyfme.environment.atmosphere import ISA1976
from pyfme.environment.gravity import VerticalConstant
from pyfme.environment.wind import NoWind
from pyfme.models.systems import EulerFlatEarth
from pyfme.simulator import BatchSimulation
from pyfme.utils.trimmer import steady_state_flight_trimmer
aircraft = Cessna172()
atmosphere = ISA1976()
gravity = VerticalConstant()
wind = NoWind()
environment = Environment(atmosphere, gravity, wind)
# Initial conditions.
TAS = 45 # m/s
h0 = 3000 # m
psi0 = 1.0 # rad
x0, y0 = 0, 0 # m
turn_rate = 0.005 # rad/s
gamma0 = 0.0 # rad
system = EulerFlatEarth(lat=0, lon=0, h=h0, psi=psi0, x_earth=x0, y_earth=y0)
not_trimmed_controls = {'delta_elevator': 0.05,
'delta_aileron': 0.01 * np.sign(turn_rate),
'delta_rudder': 0.01 * np.sign(turn_rate),
'delta_t': 0.5}
controls2trim = ['delta_elevator', 'delta_aileron', 'delta_rudder', 'delta_t']
trimmed_ac, trimmed_sys, trimmed_env, results = steady_state_flight_trimmer(
aircraft, system, environment, TAS=TAS, controls_0=not_trimmed_controls,
controls2trim=controls2trim, gamma=gamma0, turn_rate=turn_rate, verbose=1)
print()
print('delta_elev = ', "%8.4f" % np.rad2deg(results['delta_elevator']), 'deg')
print('delta_aile = ', "%8.4f" % np.rad2deg(results['delta_aileron']), 'deg')
print('delta_rud = ', "%8.4f" % np.rad2deg(results['delta_rudder']), 'deg')
print('delta_t = ', "%8.4f" % results['delta_t'], '%', '\n')
print('alpha = ', "%8.4f" % np.rad2deg(results['alpha']), 'deg')
print('beta = ', "%8.4f" % np.rad2deg(results['beta']), 'deg', '\n')
print('u = ', "%8.4f" % results['u'], 'm/s')
print('v = ', "%8.4f" % results['v'], 'm/s')
print('w = ', "%8.4f" % results['w'], 'm/s', '\n')
print('psi = ', "%8.4f" % np.rad2deg(psi0), 'deg')
print('theta = ', "%8.4f" % np.rad2deg(results['theta']), 'deg')
print('phi = ', "%8.4f" % np.rad2deg(results['phi']), 'deg', '\n')
print('p =', "%8.4f" % results['p'], 'rad/s')
print('q =', "%8.4f" % results['q'], 'rad/s')
print('r =', "%8.4f" % results['r'], 'rad/s')
my_simulation = BatchSimulation(trimmed_ac, trimmed_sys, trimmed_env)
tfin = 100 # seconds
N = tfin * 100 + 1
time = np.linspace(0, tfin, N)
initial_controls = trimmed_ac.controls
controls = {}
for control_name, control_value in initial_controls.items():
controls[control_name] = np.ones_like(time) * control_value
my_simulation.set_controls(time, controls)
par_list = ['x_earth', 'y_earth', 'height',
'psi', 'theta', 'phi',
'u', 'v', 'w',
'v_north', 'v_east', 'v_down',
'p', 'q', 'r',
'alpha', 'beta', 'TAS',
'F_xb', 'F_yb', 'F_zb',
'M_xb', 'M_yb', 'M_zb']
my_simulation.set_par_dict(par_list)
my_simulation.run_simulation()
# print(my_simulation.par_dict)
plt.style.use('ggplot')
for ii in range(len(par_list) // 3):
three_params = par_list[3 * ii:3 * ii + 3]
fig, ax = plt.subplots(3, 1, sharex=True)
for jj, par in enumerate(three_params):
ax[jj].plot(time, my_simulation.par_dict[par])
ax[jj].set_ylabel(par)
ax[jj].set_xlabel('time (s)')
fig = plt.figure()
ax = Axes3D(fig)
ax.plot(my_simulation.par_dict['x_earth'],
my_simulation.par_dict['y_earth'],
my_simulation.par_dict['height'])
ax.plot(my_simulation.par_dict['x_earth'],
my_simulation.par_dict['y_earth'],
my_simulation.par_dict['height'] * 0)
ax.set_xlabel('x_earth')
ax.set_ylabel('y_earth')
ax.set_zlabel('z_earth')
plt.show()
| mit |
chaen/DIRAC | Core/Utilities/Graphs/GraphUtilities.py | 6 | 14279 | """ GraphUtilities is a a collection of utility functions and classes used
in the DIRAC Graphs package.
The DIRAC Graphs package is derived from the GraphTool plotting package of the
CMS/Phedex Project by ... <to be added>
"""
__RCSID__ = "$Id$"
import os
import time
import datetime
import calendar
import math
import pytz
import numpy
from matplotlib.ticker import ScalarFormatter
from matplotlib.dates import AutoDateLocator, AutoDateFormatter, DateFormatter, RRuleLocator, \
rrulewrapper, HOURLY, MINUTELY, SECONDLY, YEARLY, MONTHLY, DAILY
from dateutil.relativedelta import relativedelta
def evalPrefs( *args, **kw ):
""" Interpret arguments as preferencies dictionaries or key-value pairs. The overriding order
is right most - most important one. Returns a single dictionary of preferencies
"""
prefs = {}
for pDict in list( args ) + [kw]:
if isinstance(pDict, dict):
for key in pDict:
if key == "metadata":
for mkey in pDict[key]:
prefs[mkey] = pDict[key][mkey]
else:
prefs[key] = pDict[key]
return prefs
def pixelToPoint( size, dpi ):
""" Convert size expressed in pixels into points for a given dpi resolution
"""
return float( size ) * 100. / float( dpi )
datestrings = ['%x %X', '%x', '%Y-%m-%d %H:%M:%S']
def convert_to_datetime( dstring ):
orig_string = str( dstring )
try:
if isinstance( dstring, datetime.datetime ):
results = dstring
else:
results = eval( str( dstring ), {'__builtins__':None, 'time':time, 'math':math}, {} )
if isinstance(results, (int, float)):
results = datetime.datetime.fromtimestamp( int( results ) )
elif isinstance( results, datetime.datetime ):
pass
else:
raise ValueError( "Unknown datetime type!" )
except Exception as e:
t = None
for dateformat in datestrings:
try:
t = time.strptime(dstring, dateformat)
timestamp = calendar.timegm( t ) #-time.timezone
results = datetime.datetime.fromtimestamp( timestamp )
break
except:
pass
if t is None:
try:
dstring = dstring.split('.', 1)[0]
t = time.strptime(dstring, dateformat)
timestamp = time.mktime( t ) #-time.timezone
results = datetime.datetime.fromtimestamp( timestamp )
except:
raise ValueError( "Unable to create time from string!\nExpecting " \
"format of: '12/06/06 12:54:67'\nRecieved:%s" % orig_string )
return results
def to_timestamp( val ):
try:
v = float( val )
if v > 1000000000 and v < 1900000000:
return v
except:
pass
val = convert_to_datetime( val )
#return calendar.timegm( val.timetuple() )
return time.mktime( val.timetuple() )
# If the graph has more than `hour_switch` minutes, we print
# out hours in the subtitle.
hour_switch = 7
# If the graph has more than `day_switch` hours, we print
# out days in the subtitle.
day_switch = 7
# If the graph has more than `week_switch` days, we print
# out the weeks in the subtitle.
week_switch = 7
def add_time_to_title( begin, end, metadata = {} ):
""" Given a title and two times, adds the time info to the title.
Example results::
"Number of Attempted Transfers
(24 Hours from 4:45 12-14-2006 to 5:56 12-15-2006)"
There are two important pieces to the subtitle we add - the duration
(i.e., '48 Hours') and the time interval (i.e., 11:00 07-02-2007 to
11:00 07-04-2007).
We attempt to make the duration match the size of the span (for a bar
graph, this would be the width of the individual bar) in order for it
to make the most sense. The formatting of the time interval is based
upon how much real time there is from the beginning to the end.
We made the distinction because some would want to show graphs
representing 168 Hours, but needed the format to show the date as
well as the time.
"""
if 'span' in metadata:
interval = metadata['span']
else:
interval = time_interval( begin, end )
formatting_interval = time_interval( begin, end )
if formatting_interval == 600:
format_str = '%H:%M:%S'
elif formatting_interval == 3600:
format_str = '%Y-%m-%d %H:%M'
elif formatting_interval == 86400:
format_str = '%Y-%m-%d'
elif formatting_interval == 86400 * 7:
format_str = 'Week %U of %Y'
if interval < 600:
format_name = 'Seconds'
time_slice = 1
elif interval < 3600 and interval >= 600:
format_name = 'Minutes'
time_slice = 60
elif interval >= 3600 and interval < 86400:
format_name = 'Hours'
time_slice = 3600
elif interval >= 86400 and interval < 86400 * 7:
format_name = 'Days'
time_slice = 86400
elif interval >= 86400 * 7:
format_name = 'Weeks'
time_slice = 86400 * 7
else:
format_str = '%x %X'
format_name = 'Seconds'
time_slice = 1
begin_tuple = time.localtime( begin )
end_tuple = time.localtime( end )
added_title = '%i %s from ' % ( int( ( end - begin ) / time_slice ), format_name )
added_title += time.strftime( '%s to' % format_str, begin_tuple )
if time_slice < 86400:
add_utc = ' UTC'
else:
add_utc = ''
added_title += time.strftime( ' %s%s' % ( format_str, add_utc ), end_tuple )
return added_title
def time_interval( begin, end ):
"""
Determine the appropriate time interval based upon the length of
time as indicated by the `starttime` and `endtime` keywords.
"""
if end - begin < 600 * hour_switch:
return 600
if end - begin < 86400 * day_switch:
return 3600
elif end - begin < 86400 * 7 * week_switch:
return 86400
else:
return 86400 * 7
def comma_format( x_orig ):
x = float( x_orig )
if x >= 1000:
after_comma = x % 1000
before_comma = int( x ) / 1000
return '%s,%03g' % ( comma_format( before_comma ), after_comma )
else:
return str( x_orig )
class PrettyScalarFormatter( ScalarFormatter ):
def _set_orderOfMagnitude( self, range ):
# if scientific notation is to be used, find the appropriate exponent
# if using an numerical offset, find the exponent after applying the offset
locs = numpy.absolute( self.locs )
if self.offset: oom = math.floor( math.log10( range ) )
else:
if locs[0] > locs[-1]:
val = locs[0]
else:
val = locs[-1]
if val == 0:
oom = 0
else:
oom = math.floor( math.log10( val ) )
if oom <= -7:
self.orderOfMagnitude = oom
elif oom >= 9:
self.orderOfMagnitude = oom
else:
self.orderOfMagnitude = 0
def pprint_val( self, x ):
pstring = ScalarFormatter.pprint_val( self, x )
return comma_format( pstring )
class PrettyDateFormatter( AutoDateFormatter ):
""" This class provides a formatter which conforms to the
desired date formates for the Phedex system.
"""
def __init__( self, locator ):
tz = pytz.timezone( 'UTC' )
AutoDateFormatter.__init__( self, locator, tz = tz )
def __call__( self, x, pos = 0 ):
scale = float( self._locator._get_unit() )
if scale == 365.0:
self._formatter = DateFormatter( "%Y", self._tz )
elif scale == 30.0:
self._formatter = DateFormatter( "%b %Y", self._tz )
elif ( scale >= 1.0 ) and ( scale <= 7.0 ):
self._formatter = DateFormatter( "%Y-%m-%d", self._tz )
elif scale == ( 1.0 / 24.0 ):
self._formatter = DateFormatter( "%H:%M", self._tz )
elif scale == ( 1.0 / ( 24 * 60 ) ):
self._formatter = DateFormatter( "%H:%M", self._tz )
elif scale == ( 1.0 / ( 24 * 3600 ) ):
self._formatter = DateFormatter( "%H:%M:%S", self._tz )
else:
self._formatter = DateFormatter( "%b %d %Y %H:%M:%S", self._tz )
return self._formatter( x, pos )
class PrettyDateLocator( AutoDateLocator ):
def get_locator( self, dmin, dmax ):
'pick the best locator based on a distance'
delta = relativedelta( dmax, dmin )
numYears = ( delta.years * 1.0 )
numMonths = ( numYears * 12.0 ) + delta.months
numDays = ( numMonths * 31.0 ) + delta.days
numHours = ( numDays * 24.0 ) + delta.hours
numMinutes = ( numHours * 60.0 ) + delta.minutes
numSeconds = ( numMinutes * 60.0 ) + delta.seconds
numticks = 5
# self._freq = YEARLY
interval = 1
bymonth = 1
bymonthday = 1
byhour = 0
byminute = 0
bysecond = 0
if numYears >= numticks:
self._freq = YEARLY
elif numMonths >= numticks:
self._freq = MONTHLY
bymonth = range( 1, 13 )
if ( 0 <= numMonths ) and ( numMonths <= 14 ):
interval = 1 # show every month
elif ( 15 <= numMonths ) and ( numMonths <= 29 ):
interval = 3 # show every 3 months
elif ( 30 <= numMonths ) and ( numMonths <= 44 ):
interval = 4 # show every 4 months
else: # 45 <= numMonths <= 59
interval = 6 # show every 6 months
elif numDays >= numticks:
self._freq = DAILY
bymonth = None
bymonthday = range( 1, 32 )
if ( 0 <= numDays ) and ( numDays <= 9 ):
interval = 1 # show every day
elif ( 10 <= numDays ) and ( numDays <= 19 ):
interval = 2 # show every 2 days
elif ( 20 <= numDays ) and ( numDays <= 35 ):
interval = 3 # show every 3 days
elif ( 36 <= numDays ) and ( numDays <= 80 ):
interval = 7 # show every 1 week
else: # 100 <= numDays <= ~150
interval = 14 # show every 2 weeks
elif numHours >= numticks:
self._freq = HOURLY
bymonth = None
bymonthday = None
byhour = range( 0, 24 ) # show every hour
if ( 0 <= numHours ) and ( numHours <= 14 ):
interval = 1 # show every hour
elif ( 15 <= numHours ) and ( numHours <= 30 ):
interval = 2 # show every 2 hours
elif ( 30 <= numHours ) and ( numHours <= 45 ):
interval = 3 # show every 3 hours
elif ( 45 <= numHours ) and ( numHours <= 68 ):
interval = 4 # show every 4 hours
elif ( 68 <= numHours ) and ( numHours <= 90 ):
interval = 6 # show every 6 hours
else: # 90 <= numHours <= 120
interval = 12 # show every 12 hours
elif numMinutes >= numticks:
self._freq = MINUTELY
bymonth = None
bymonthday = None
byhour = None
byminute = range( 0, 60 )
if numMinutes > ( 10.0 * numticks ):
interval = 10
# end if
elif numSeconds >= numticks:
self._freq = SECONDLY
bymonth = None
bymonthday = None
byhour = None
byminute = None
bysecond = range( 0, 60 )
if numSeconds > ( 10.0 * numticks ):
interval = 10
# end if
else:
# do what?
# microseconds as floats, but floats from what reference point?
pass
rrule = rrulewrapper( self._freq, interval = interval, \
dtstart = dmin, until = dmax, \
bymonth = bymonth, bymonthday = bymonthday, \
byhour = byhour, byminute = byminute, \
bysecond = bysecond )
locator = RRuleLocator( rrule, self.tz )
locator.set_axis( self.axis )
locator.set_view_interval( *self.axis.get_view_interval() )
locator.set_data_interval( *self.axis.get_data_interval() )
return locator
def pretty_float( num ):
if num > 1000:
return comma_format( int( num ) )
try:
floats = int( max( 2 - max( numpy.floor( numpy.log( abs( num ) + 1e-3 ) / numpy.log( 10. ) ), 0 ), 0 ) )
except:
floats = 2
format = "%." + str( floats ) + "f"
if isinstance(num, tuple):
return format % float( num[0] )
else:
try:
retval = format % float( num )
except:
raise Exception( "Unable to convert %s into a float." % ( str( num ) ) )
return retval
def statistics( results, span = None, is_timestamp = False ):
results = dict( results )
if span != None:
parsed_data = {}
min_key = min( results.keys() )
max_key = max( results.keys() )
for i in range( min_key, max_key + span, span ):
if i in results:
parsed_data[i] = results[i]
del results[i]
else:
parsed_data[i] = 0.0
if len( results ) > 0:
raise Exception( "Unable to use all the values for the statistics" )
else:
parsed_data = results
values = parsed_data.values()
data_min = min( values )
data_max = max( values )
data_avg = numpy.average( values )
if is_timestamp:
current_time = max( parsed_data.keys() )
data_current = parsed_data[ current_time ]
return data_min, data_max, data_avg, data_current
else:
return data_min, data_max, data_avg
def makeDataFromCSV( csv ):
""" Generate plot data dictionary from a csv file or string
"""
if os.path.exists( csv ):
with open( csv, 'r' ) as fdata:
flines = fdata.readlines()
else:
flines = csv.split( '\n' )
graph_data = {}
labels = flines[0].strip().split( ',' )
if len( labels ) == 2:
# simple plot data
for line in flines:
line = line.strip()
if line[0] != '#':
key, value = line.split( ',' )
graph_data[key] = value
elif len( flines ) == 2:
values = flines[1].strip().split( ',' )
for key,value in zip(labels,values):
graph_data[key] = value
elif len( labels ) > 2:
# stacked graph data
del labels[0]
del flines[0]
for label in labels:
plot_data = {}
index = labels.index( label ) + 1
for line in flines:
values = line.strip().split( ',' )
value = values[index].strip()
#if value:
plot_data[values[0]] = values[index]
#else:
#plot_data[values[0]] = '0.'
#pass
graph_data[label] = dict( plot_data )
return graph_data
def darkenColor( color, factor=2 ):
c1 = int( color[1:3], 16 )
c2 = int( color[3:5], 16 )
c3 = int( color[5:7], 16 )
c1 /= factor
c2 /= factor
c3 /= factor
result = '#' + (str( hex( c1) ).replace( '0x', '' ).zfill( 2 ) +
str( hex( c2) ).replace( '0x', '' ).zfill( 2 ) +
str( hex( c3) ).replace( '0x', '' ).zfill( 2 ) )
return result
| gpl-3.0 |
nesterione/scikit-learn | sklearn/tests/test_metaestimators.py | 226 | 4954 | """Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
class DelegatorData(object):
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2, n_iter=1),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('BaggingClassifier', BaggingClassifier,
skip_methods=['transform', 'inverse_transform', 'score',
'predict_proba', 'predict_log_proba', 'predict'])
]
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
if not hasattr(self, 'coef_'):
raise RuntimeError('Estimator is not fit')
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in iterkeys(SubEstimator.__dict__)
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert_true(hasattr(delegate, method))
assert_true(hasattr(delegator, method),
msg="%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises an exception
assert_raises(Exception, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert_false(hasattr(delegate, method))
assert_false(hasattr(delegator, method),
msg="%s has method %r when its delegate does not"
% (delegator_data.name, method))
| bsd-3-clause |
kazemakase/scikit-learn | examples/cluster/plot_digits_agglomeration.py | 377 | 1694 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Feature agglomeration
=========================================================
These images how similar features are merged together using
feature agglomeration.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
digits = datasets.load_digits()
images = digits.images
X = np.reshape(images, (len(images), -1))
connectivity = grid_to_graph(*images[0].shape)
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
X_reduced = agglo.transform(X)
X_restored = agglo.inverse_transform(X_reduced)
images_restored = np.reshape(X_restored, images.shape)
plt.figure(1, figsize=(4, 3.5))
plt.clf()
plt.subplots_adjust(left=.01, right=.99, bottom=.01, top=.91)
for i in range(4):
plt.subplot(3, 4, i + 1)
plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest')
plt.xticks(())
plt.yticks(())
if i == 1:
plt.title('Original data')
plt.subplot(3, 4, 4 + i + 1)
plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16,
interpolation='nearest')
if i == 1:
plt.title('Agglomerated data')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 10)
plt.imshow(np.reshape(agglo.labels_, images[0].shape),
interpolation='nearest', cmap=plt.cm.spectral)
plt.xticks(())
plt.yticks(())
plt.title('Labels')
plt.show()
| bsd-3-clause |
DoddyPhysics/AxionNet | naxion.py | 1 | 6909 | """
This is the workhorse of the code. Take a model and solve its equations of motion.
Compute outputs and quasiobservables.
There are some plotting scripts too.
DJEM+MS+CP+LP, 2017
"""
import scipy.integrate as integrate
import numpy as np
import numpy.random as rd
import ConfigParser
import eoms as eoms
import output as output
import model_class
class hubble_calculator(object):
def __init__(self,fname='configuration_card.ini',ifsampling=False,mnum=None,hypervec=None,init_Kdiag=True,remove_masses=True):
""" Initialise the object. Default uses configuration card.
If you use ifsampling, then mnum and hypervec are required, otherwise they are ignored.
A thing that needs fixing here: it reads the config card on every step of an MCMC, so if you change it
during a run, it affects the run. """
myModel = model_class.ModelClass(fname=fname,ifsampling=ifsampling,init_Kdiag=init_Kdiag,remove_masses=remove_masses,
mnum=mnum,hypervec=hypervec)
# Hard code the critical density and baryon density
self.rho_crit=3.
self.ombh2=0.022
self.n,self.ma_array,self.phiin_array,self.phidotin_array=myModel.getParams()
self.rho_m0,self.rhol,self.rho_r0=myModel.cosmo()
self.ain,self.tin,self.tfi=myModel.inits()
self.N,self.n_cross=myModel.evolution()
# convert to integers
self.N=int(self.N)
self.n_cross=int(self.n_cross)
self.n=int(self.n)
self.crossing_index=[0]*self.n
# use this if output = output_new
self.crossing_array=np.zeros((self.N,self.n))
self.rhoin_array = eoms.rhoinitial(self.phidotin_array, self.phiin_array, self.ma_array, self.n)
self.y0 = eoms.yinitial(self.n,self.phiin_array,self.phidotin_array,self.rhoin_array,self.ain)
def eq(self,y,t):
"""Equations of motion."""
#return eoms.deriv_wfromphi(y, t, self.n, self.n_cross,self.ma_array, self.rho_m0, self.rho_r0, self.rhol)
return eoms.deriv_wfromphi(y, t, self.n, self.n_cross,self.crossing_index,self.ma_array, self.rho_m0, self.rho_r0, self.rhol)
def solver(self):
"""Solve the equations of motion with initial and final time set by class attributes."""
self.t = np.logspace(np.log10(self.tin),np.log10(self.tfi),self.N)
self.y = integrate.odeint(self.eq, self.y0, self.t, mxstep=100000000)
def output(self):
"""Obtain some derived quantities from the time steps."""
self.rhoa = output.axionrho(self.y,self.N,self.n)
self.rhom, self.rhor = output.dense(self.rho_m0,self.rho_r0,self.N,self.y)
self.rholl = output.clambda(self.rhol,self.N)
self.rhosum = output.totalrho(self.rhom, self.rholl, self.rhor, self.rhoa, self.N)
self.P, self.Psum = output.pressure(self.y,self.ma_array,self.N,self.n,self.rhom,self.rhol,self.rhor)
self.w=output.w(self.P,self.rhoa,self.N)
self.H = output.hubble(self.t, self.rhosum)
self.z = output.redshift(self.y, self.N)
self.rhoo, self.rhon = output.darkflow(self.rhom, self.rhor, self.rhol, self.rhoa, self.ma_array, self.rhosum, self.n, self.y,self.N)
self.a = output.scalefactor(self.y,self.N)
self.add = output.accel(self.a,self.N,self.rhosum,self.Psum)
self.omegar, self.omegam = output.omegas(self.rhor,self.rhom,self.rhosum,self.N)
self.phi = output.axionphi(self.y,self.N)
self.phid = output.axionphidot(self.y,self.N)
return self.z,self.H,self.w
def phiplot(self):
"""
Call this to plot phis in test_sampler
"""
import matplotlib.pyplot as plt
for i in range(self.n):
plt.plot(self.t,self.y[:,3*i])
plt.xscale('log')
plt.show()
def rhoplot(self):
"""
Call this to plot rhos in test_sampler
"""
import matplotlib.pyplot as plt
self.rhoDMa,self.rhoDEa=output.darkflow(self.y,self.N,self.n)
self.rhom, self.rhor = output.dense(self.rho_m0,self.rho_r0,self.N,self.y)
self.rholl = output.clambda(self.rhol,self.N)
self.z = output.redshift(self.y, self.N)
inds=np.where(self.z<0)[0]
if np.shape(inds)[0]==0:
last=np.size(self.z)
else:
last=inds[0]
self.a=output.scalefactor(self.y,self.N)
avec=self.a[:last]
plt.plot(avec,self.rhoDMa[:last],'-k',linewidth=2.)
plt.plot(avec,self.rhoDEa[:last],'--k',linewidth=2.)
plt.plot(avec,self.rhor[:last],'-r',linewidth=2.)
plt.plot(avec,self.rhom[:last],'-b',linewidth=2.)
plt.plot(avec,self.rholl[:last],'-g',linewidth=2.)
plt.xscale('log')
plt.yscale('log')
plt.ylim([1.e-5,1.e28])
plt.show()
def quasiObs(self):
""" A very simple output of quasi-observables for MCMC """
# First find z=0, should probably raise an exception in case this is not found
self.z = output.redshift(self.y, self.N)
#pos = len(self.z[self.z>=0])-1 # last positive z index
inds=np.where(self.z<0)[0] # first negative z index
pos=inds[0]
if np.shape(inds)[0]==0:
# return dummy values if z=0 is not found
print 'z=0 was not found, returning dummy values that will fail likelihood'
return 100.,100.,-1.,100.
# Get all the densities and pressures for acceleration and H
self.a=output.scalefactor(self.y,self.N)
self.rhoa = output.axionrho(self.y,self.N,self.n)
self.rhom, self.rhor = output.dense(self.rho_m0,self.rho_r0,self.N,self.y)
self.rholl = output.clambda(self.rhol,self.N)
self.rhosum = output.totalrho(self.rhom, self.rholl, self.rhor, self.rhoa, self.N)
self.P, self.Psum = output.pressure(self.y,self.ma_array,self.N,self.n,self.rhom,self.rholl,self.rhor)
self.H=output.hubble(self.t, self.rhosum)
self.add = output.accel(self.a,self.N,self.rhosum,self.Psum)
# H0 and \ddot{a}
self.H0=self.H[pos]
self.add0=self.add[pos]
# Split the axion density into DM and DE
self.rhoDMa,self.rhoDEa=output.darkflow(self.y,self.N,self.n)
# Subtract the baryons to get the CDM density
self.rhoCDM=self.rhom-self.rho_crit*self.ombh2*(1.+self.z)**3.
self.totM=self.rhom+self.rhoDMa
self.totDM=self.rhoCDM+self.rhoDMa
self.totDE=self.rholl+self.rhoDEa
# Compute Omch2 and OmM
self.rhor0=self.rhor[pos]
self.rhom0=self.totM[pos]
self.rhoDE0=self.totDE[pos]
self.Omch2=self.totDM[pos]/self.rho_crit
self.OmM=self.rhom0/(self.rhom0+self.rhoDE0+self.rhor0)
# Equality
self.zeq=output.zeq(self.z,self.totM,self.rhor)
return self.H0,self.OmM,self.add0,self.zeq # use this is Om is quasi-obs
#return self.H0,self.Omch2,self.add0,self.zeq # use this if Omch2 is quasi-obs
############################
# Main routine runs on import
# I don't use this for MCMC, and I run tests from test_sampler script.
##########################
#def main():
# if len(argv)<1:
# raise Exception('Need to specify the configuration file name via a command line argument.')
# config_fname = argv[1]
#Initialize calculator, which diagonalizes mass/KE matrices, etc
# my_calculator = hubble_calculator(configname=config_fname)
#Solve ODEs from tin to tfi
# my_calculator.solver()
#Save some information
# my_calculator.output()
#Make a plot
# my_calculator.printout()
#if __name__ == "__main__":
# main()
| mit |
ocefpaf/cartopy | lib/cartopy/io/ogc_clients.py | 1 | 35567 | # (C) British Crown Copyright 2014 - 2019, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
"""
Implements RasterSource classes which can retrieve imagery from web services
such as WMS and WMTS.
The matplotlib interface can make use of RasterSources via the
:meth:`cartopy.mpl.geoaxes.GeoAxes.add_raster` method,
with additional specific methods which make use of this for WMS and WMTS
(:meth:`~cartopy.mpl.geoaxes.GeoAxes.add_wms` and
:meth:`~cartopy.mpl.geoaxes.GeoAxes.add_wmts`). An example of using WMTS in
this way can be found at :ref:`sphx_glr_gallery_wmts.py`.
"""
from __future__ import (absolute_import, division, print_function)
import six
import collections
import io
import math
import warnings
import weakref
from xml.etree import ElementTree
from PIL import Image
import numpy as np
import shapely.geometry as sgeom
try:
from owslib.wms import WebMapService
from owslib.wfs import WebFeatureService
import owslib.util
import owslib.wmts
_OWSLIB_AVAILABLE = True
except ImportError:
WebMapService = None
WebFeatureService = None
_OWSLIB_AVAILABLE = False
import cartopy.crs as ccrs
from cartopy.io import LocatedImage, RasterSource
from cartopy.img_transform import warp_array
_OWSLIB_REQUIRED = 'OWSLib is required to use OGC web services.'
# Hardcode some known EPSG codes for now.
# The order given here determines the preferred SRS for WMS retrievals.
_CRS_TO_OGC_SRS = collections.OrderedDict(
[(ccrs.PlateCarree(), 'EPSG:4326'),
(ccrs.Mercator.GOOGLE, 'EPSG:900913'),
(ccrs.OSGB(approx=True), 'EPSG:27700')
])
# Standard pixel size of 0.28 mm as defined by WMTS.
METERS_PER_PIXEL = 0.28e-3
_WGS84_METERS_PER_UNIT = 2 * math.pi * 6378137 / 360
METERS_PER_UNIT = {
'urn:ogc:def:crs:EPSG::27700': 1,
'urn:ogc:def:crs:EPSG::900913': 1,
'urn:ogc:def:crs:OGC:1.3:CRS84': _WGS84_METERS_PER_UNIT,
'urn:ogc:def:crs:EPSG::3031': 1,
'urn:ogc:def:crs:EPSG::3413': 1,
'urn:ogc:def:crs:EPSG::3857': 1,
'urn:ogc:def:crs:EPSG:6.18.3:3857': 1
}
_URN_TO_CRS = collections.OrderedDict(
[('urn:ogc:def:crs:OGC:1.3:CRS84', ccrs.PlateCarree()),
('urn:ogc:def:crs:EPSG::4326', ccrs.PlateCarree()),
('urn:ogc:def:crs:EPSG::900913', ccrs.GOOGLE_MERCATOR),
('urn:ogc:def:crs:EPSG::27700', ccrs.OSGB(approx=True)),
('urn:ogc:def:crs:EPSG::3031', ccrs.Stereographic(
central_latitude=-90,
true_scale_latitude=-71)),
('urn:ogc:def:crs:EPSG::3413', ccrs.Stereographic(
central_longitude=-45,
central_latitude=90,
true_scale_latitude=70)),
('urn:ogc:def:crs:EPSG::3857', ccrs.GOOGLE_MERCATOR),
('urn:ogc:def:crs:EPSG:6.18.3:3857', ccrs.GOOGLE_MERCATOR)
])
# XML namespace definitions
_MAP_SERVER_NS = '{http://mapserver.gis.umn.edu/mapserver}'
_GML_NS = '{http://www.opengis.net/gml}'
def _warped_located_image(image, source_projection, source_extent,
output_projection, output_extent, target_resolution):
"""
Reproject an Image from one source-projection and extent to another.
Returns
-------
LocatedImage
A reprojected LocatedImage, the extent of which is >= the requested
'output_extent'.
"""
if source_projection == output_projection:
extent = output_extent
else:
# Convert Image to numpy array (flipping so that origin
# is 'lower').
img, extent = warp_array(np.asanyarray(image)[::-1],
source_proj=source_projection,
source_extent=source_extent,
target_proj=output_projection,
target_res=np.asarray(target_resolution,
dtype=int),
target_extent=output_extent,
mask_extrapolated=True)
# Convert arrays with masked RGB(A) values to non-masked RGBA
# arrays, setting the alpha channel to zero for masked values.
# This avoids unsightly grey boundaries appearing when the
# extent is limited (i.e. not global).
if np.ma.is_masked(img):
if img.shape[2:3] == (3,):
# RGB
old_img = img
img = np.zeros(img.shape[:2] + (4,), dtype=img.dtype)
img[:, :, 0:3] = old_img
img[:, :, 3] = ~ np.any(old_img.mask, axis=2)
if img.dtype.kind == 'u':
img[:, :, 3] *= 255
elif img.shape[2:3] == (4,):
# RGBA
img[:, :, 3] = np.where(np.any(img.mask, axis=2), 0,
img[:, :, 3])
img = img.data
# Convert warped image array back to an Image, undoing the
# earlier flip.
image = Image.fromarray(img[::-1])
return LocatedImage(image, extent)
def _target_extents(extent, requested_projection, available_projection):
"""
Translate the requested extent in the display projection into a list of
extents in the projection available from the service (multiple if it
crosses seams).
The extents are represented as (min_x, max_x, min_y, max_y).
"""
# Start with the requested area.
min_x, max_x, min_y, max_y = extent
target_box = sgeom.box(min_x, min_y, max_x, max_y)
# If the requested area (i.e. target_box) is bigger (or nearly bigger) than
# the entire output requested_projection domain, then we erode the request
# area to avoid re-projection instabilities near the projection boundary.
buffered_target_box = target_box.buffer(requested_projection.threshold,
resolution=1)
fudge_mode = buffered_target_box.contains(requested_projection.domain)
if fudge_mode:
target_box = requested_projection.domain.buffer(
-requested_projection.threshold)
# Translate the requested area into the server projection.
polys = available_projection.project_geometry(target_box,
requested_projection)
# Return the polygons' rectangular bounds as extent tuples.
target_extents = []
for poly in polys:
min_x, min_y, max_x, max_y = poly.bounds
if fudge_mode:
# If we shrunk the request area before, then here we
# need to re-inflate.
radius = min(max_x - min_x, max_y - min_y) / 5.0
radius = min(radius, available_projection.threshold * 15)
poly = poly.buffer(radius, resolution=1)
# Prevent the expanded request going beyond the
# limits of the requested_projection.
poly = available_projection.domain.intersection(poly)
min_x, min_y, max_x, max_y = poly.bounds
target_extents.append((min_x, max_x, min_y, max_y))
return target_extents
class WMSRasterSource(RasterSource):
"""
A WMS imagery retriever which can be added to a map.
Note
----
Requires owslib and Pillow to work.
No caching of retrieved maps is done with this WMSRasterSource.
To reduce load on the WMS server it is encouraged to tile
map requests and subsequently stitch them together to recreate
a single raster, thus allowing for a more aggressive caching scheme,
but this WMSRasterSource does not currently implement WMS tile
fetching.
Whilst not the same service, there is also a WMTSRasterSource which
makes use of tiles and comes with built-in caching for fast repeated
map retrievals.
"""
def __init__(self, service, layers, getmap_extra_kwargs=None):
"""
Parameters
----------
service: string or WebMapService instance
The WebMapService instance, or URL of a WMS service,
from whence to retrieve the image.
layers: string or list of strings
The name(s) of layers to use from the WMS service.
getmap_extra_kwargs: dict, optional
Extra keywords to pass through to the service's getmap method.
If None, a dictionary with ``{'transparent': True}`` will be
defined.
"""
if WebMapService is None:
raise ImportError(_OWSLIB_REQUIRED)
if isinstance(service, six.string_types):
service = WebMapService(service)
if isinstance(layers, six.string_types):
layers = [layers]
if getmap_extra_kwargs is None:
getmap_extra_kwargs = {'transparent': True}
if len(layers) == 0:
raise ValueError('One or more layers must be defined.')
for layer in layers:
if layer not in service.contents:
raise ValueError('The {!r} layer does not exist in '
'this service.'.format(layer))
#: The OWSLib WebMapService instance.
self.service = service
#: The names of the layers to fetch.
self.layers = layers
#: Extra kwargs passed through to the service's getmap request.
self.getmap_extra_kwargs = getmap_extra_kwargs
def _native_srs(self, projection):
# Return the SRS which corresponds to the given projection when
# known, otherwise return None.
return _CRS_TO_OGC_SRS.get(projection)
def _fallback_proj_and_srs(self):
"""
Return a :class:`cartopy.crs.Projection` and corresponding
SRS string in which the WMS service can supply the requested
layers.
"""
contents = self.service.contents
for proj, srs in six.iteritems(_CRS_TO_OGC_SRS):
missing = any(srs not in contents[layer].crsOptions for
layer in self.layers)
if not missing:
break
if missing:
raise ValueError('The requested layers are not available in a '
'known SRS.')
return proj, srs
def validate_projection(self, projection):
if self._native_srs(projection) is None:
self._fallback_proj_and_srs()
def _image_and_extent(self, wms_proj, wms_srs, wms_extent, output_proj,
output_extent, target_resolution):
min_x, max_x, min_y, max_y = wms_extent
wms_image = self.service.getmap(layers=self.layers,
srs=wms_srs,
bbox=(min_x, min_y, max_x, max_y),
size=target_resolution,
format='image/png',
**self.getmap_extra_kwargs)
wms_image = Image.open(io.BytesIO(wms_image.read()))
return _warped_located_image(wms_image, wms_proj, wms_extent,
output_proj, output_extent,
target_resolution)
def fetch_raster(self, projection, extent, target_resolution):
target_resolution = [int(np.ceil(val)) for val in target_resolution]
wms_srs = self._native_srs(projection)
if wms_srs is not None:
wms_proj = projection
wms_extents = [extent]
else:
# The SRS for the requested projection is not known, so
# attempt to use the fallback and perform the necessary
# transformations.
wms_proj, wms_srs = self._fallback_proj_and_srs()
# Calculate the bounding box(es) in WMS projection.
wms_extents = _target_extents(extent, projection, wms_proj)
located_images = []
for wms_extent in wms_extents:
located_images.append(self._image_and_extent(wms_proj, wms_srs,
wms_extent,
projection, extent,
target_resolution))
return located_images
class WMTSRasterSource(RasterSource):
"""
A WMTS imagery retriever which can be added to a map.
Uses tile caching for fast repeated map retrievals.
Note
----
Requires owslib and Pillow to work.
"""
_shared_image_cache = weakref.WeakKeyDictionary()
"""
A nested mapping from WMTS, layer name, tile matrix name, tile row
and tile column to the resulting PIL image::
{wmts: {(layer_name, tile_matrix_name): {(row, column): Image}}}
This provides a significant boost when producing multiple maps of the
same projection or with an interactive figure.
"""
def __init__(self, wmts, layer_name, gettile_extra_kwargs=None):
"""
Parameters
----------
wmts
The URL of the WMTS, or an owslib.wmts.WebMapTileService instance.
layer_name
The name of the layer to use.
gettile_extra_kwargs: dict, optional
Extra keywords (e.g. time) to pass through to the
service's gettile method.
"""
if WebMapService is None:
raise ImportError(_OWSLIB_REQUIRED)
if not (hasattr(wmts, 'tilematrixsets') and
hasattr(wmts, 'contents') and
hasattr(wmts, 'gettile')):
wmts = owslib.wmts.WebMapTileService(wmts)
try:
layer = wmts.contents[layer_name]
except KeyError:
raise ValueError('Invalid layer name {!r} for WMTS at {!r}'.format(
layer_name, wmts.url))
#: The OWSLib WebMapTileService instance.
self.wmts = wmts
#: The layer to fetch.
self.layer = layer
#: Extra kwargs passed through to the service's gettile request.
if gettile_extra_kwargs is None:
gettile_extra_kwargs = {}
self.gettile_extra_kwargs = gettile_extra_kwargs
self._matrix_set_name_map = {}
def _matrix_set_name(self, target_projection):
key = id(target_projection)
matrix_set_name = self._matrix_set_name_map.get(key)
if matrix_set_name is None:
if hasattr(self.layer, 'tilematrixsetlinks'):
matrix_set_names = self.layer.tilematrixsetlinks.keys()
else:
matrix_set_names = self.layer.tilematrixsets
def find_projection(match_projection):
result = None
for tile_matrix_set_name in matrix_set_names:
matrix_sets = self.wmts.tilematrixsets
tile_matrix_set = matrix_sets[tile_matrix_set_name]
crs_urn = tile_matrix_set.crs
tms_crs = _URN_TO_CRS.get(crs_urn)
if tms_crs == match_projection:
result = tile_matrix_set_name
break
return result
# First search for a matrix set in the target projection.
matrix_set_name = find_projection(target_projection)
if matrix_set_name is None:
# Search instead for a set in _any_ projection we can use.
for possible_projection in _URN_TO_CRS.values():
# Look for supported projections (in a preferred order).
matrix_set_name = find_projection(possible_projection)
if matrix_set_name is not None:
break
if matrix_set_name is None:
# Fail completely.
available_urns = sorted(set(
self.wmts.tilematrixsets[name].crs
for name in matrix_set_names))
msg = 'Unable to find tile matrix for projection.'
msg += '\n Projection: ' + str(target_projection)
msg += '\n Available tile CRS URNs:'
msg += '\n ' + '\n '.join(available_urns)
raise ValueError(msg)
self._matrix_set_name_map[key] = matrix_set_name
return matrix_set_name
def validate_projection(self, projection):
self._matrix_set_name(projection)
def fetch_raster(self, projection, extent, target_resolution):
matrix_set_name = self._matrix_set_name(projection)
wmts_projection = _URN_TO_CRS[
self.wmts.tilematrixsets[matrix_set_name].crs]
if wmts_projection == projection:
wmts_extents = [extent]
else:
# Calculate (possibly multiple) extents in the given projection.
wmts_extents = _target_extents(extent, projection, wmts_projection)
# Bump resolution by a small factor, as a weak alternative to
# delivering a minimum projected resolution.
# Generally, the desired area is smaller than the enclosing extent
# in projection space and may have varying scaling, so the ideal
# solution is a hard problem !
resolution_factor = 1.4
target_resolution = np.array(target_resolution) * resolution_factor
width, height = target_resolution
located_images = []
for wmts_desired_extent in wmts_extents:
# Calculate target resolution for the actual polygon. Note that
# this gives *every* polygon enough pixels for the whole result,
# which is potentially excessive!
min_x, max_x, min_y, max_y = wmts_desired_extent
if wmts_projection == projection:
max_pixel_span = min((max_x - min_x) / width,
(max_y - min_y) / height)
else:
# X/Y orientation is arbitrary, so use a worst-case guess.
max_pixel_span = (min(max_x - min_x, max_y - min_y) /
max(width, height))
# Fetch a suitable image and its actual extent.
wmts_image, wmts_actual_extent = self._wmts_images(
self.wmts, self.layer, matrix_set_name,
extent=wmts_desired_extent,
max_pixel_span=max_pixel_span)
# Return each (image, extent) as a LocatedImage.
if wmts_projection == projection:
located_image = LocatedImage(wmts_image, wmts_actual_extent)
else:
# Reproject the image to the desired projection.
located_image = _warped_located_image(
wmts_image,
wmts_projection, wmts_actual_extent,
output_projection=projection, output_extent=extent,
target_resolution=target_resolution)
located_images.append(located_image)
return located_images
def _choose_matrix(self, tile_matrices, meters_per_unit, max_pixel_span):
# Get the tile matrices in order of increasing resolution.
tile_matrices = sorted(tile_matrices,
key=lambda tm: tm.scaledenominator,
reverse=True)
# Find which tile matrix has the appropriate resolution.
max_scale = max_pixel_span * meters_per_unit / METERS_PER_PIXEL
for tm in tile_matrices:
if tm.scaledenominator <= max_scale:
return tm
return tile_matrices[-1]
def _tile_span(self, tile_matrix, meters_per_unit):
pixel_span = (tile_matrix.scaledenominator *
(METERS_PER_PIXEL / meters_per_unit))
tile_span_x = tile_matrix.tilewidth * pixel_span
tile_span_y = tile_matrix.tileheight * pixel_span
return tile_span_x, tile_span_y
def _select_tiles(self, tile_matrix, tile_matrix_limits,
tile_span_x, tile_span_y, extent):
# Convert the requested extent from CRS coordinates to tile
# indices. See annex H of the WMTS v1.0.0 spec.
# NB. The epsilons get rid of any tiles which only just
# (i.e. one part in a million) intrude into the requested
# extent. Since these wouldn't be visible anyway there's nothing
# to be gained by spending the time downloading them.
min_x, max_x, min_y, max_y = extent
matrix_min_x, matrix_max_y = tile_matrix.topleftcorner
epsilon = 1e-6
min_col = int((min_x - matrix_min_x) / tile_span_x + epsilon)
max_col = int((max_x - matrix_min_x) / tile_span_x - epsilon)
min_row = int((matrix_max_y - max_y) / tile_span_y + epsilon)
max_row = int((matrix_max_y - min_y) / tile_span_y - epsilon)
# Clamp to the limits of the tile matrix.
min_col = max(min_col, 0)
max_col = min(max_col, tile_matrix.matrixwidth - 1)
min_row = max(min_row, 0)
max_row = min(max_row, tile_matrix.matrixheight - 1)
# Clamp to any layer-specific limits on the tile matrix.
if tile_matrix_limits:
min_col = max(min_col, tile_matrix_limits.mintilecol)
max_col = min(max_col, tile_matrix_limits.maxtilecol)
min_row = max(min_row, tile_matrix_limits.mintilerow)
max_row = min(max_row, tile_matrix_limits.maxtilerow)
return min_col, max_col, min_row, max_row
def _wmts_images(self, wmts, layer, matrix_set_name, extent,
max_pixel_span):
"""
Add images from the specified WMTS layer and matrix set to cover
the specified extent at an appropriate resolution.
The zoom level (aka. tile matrix) is chosen to give the lowest
possible resolution which still provides the requested quality.
If insufficient resolution is available, the highest available
resolution is used.
Parameters
----------
wmts
The owslib.wmts.WebMapTileService providing the tiles.
layer
The owslib.wmts.ContentMetadata (aka. layer) to draw.
matrix_set_name
The name of the matrix set to use.
extent
Tuple of (left, right, bottom, top) in Axes coordinates.
max_pixel_span
Preferred maximum pixel width or height in Axes coordinates.
"""
# Find which tile matrix has the appropriate resolution.
tile_matrix_set = wmts.tilematrixsets[matrix_set_name]
tile_matrices = tile_matrix_set.tilematrix.values()
meters_per_unit = METERS_PER_UNIT[tile_matrix_set.crs]
tile_matrix = self._choose_matrix(tile_matrices, meters_per_unit,
max_pixel_span)
# Determine which tiles are required to cover the requested extent.
tile_span_x, tile_span_y = self._tile_span(tile_matrix,
meters_per_unit)
tile_matrix_set_links = getattr(layer, 'tilematrixsetlinks', None)
if tile_matrix_set_links is None:
tile_matrix_limits = None
else:
tile_matrix_set_link = tile_matrix_set_links[matrix_set_name]
tile_matrix_limits = tile_matrix_set_link.tilematrixlimits.get(
tile_matrix.identifier)
min_col, max_col, min_row, max_row = self._select_tiles(
tile_matrix, tile_matrix_limits, tile_span_x, tile_span_y, extent)
# Find the relevant section of the image cache.
tile_matrix_id = tile_matrix.identifier
cache_by_wmts = WMTSRasterSource._shared_image_cache
cache_by_layer_matrix = cache_by_wmts.setdefault(wmts, {})
image_cache = cache_by_layer_matrix.setdefault((layer.id,
tile_matrix_id), {})
# To avoid nasty seams between the individual tiles, we
# accumulate the tile images into a single image.
big_img = None
n_rows = 1 + max_row - min_row
n_cols = 1 + max_col - min_col
# Ignore out-of-range errors if the current version of OWSLib
# doesn't provide the regional information.
ignore_out_of_range = tile_matrix_set_links is None
for row in range(min_row, max_row + 1):
for col in range(min_col, max_col + 1):
# Get the tile's Image from the cache if possible.
img_key = (row, col)
img = image_cache.get(img_key)
if img is None:
try:
tile = wmts.gettile(
layer=layer.id,
tilematrixset=matrix_set_name,
tilematrix=str(tile_matrix_id),
row=str(row), column=str(col),
**self.gettile_extra_kwargs)
except owslib.util.ServiceException as exception:
if ('TileOutOfRange' in exception.message and
ignore_out_of_range):
continue
raise exception
img = Image.open(io.BytesIO(tile.read()))
image_cache[img_key] = img
if big_img is None:
size = (img.size[0] * n_cols, img.size[1] * n_rows)
big_img = Image.new('RGBA', size, (255, 255, 255, 255))
top = (row - min_row) * tile_matrix.tileheight
left = (col - min_col) * tile_matrix.tilewidth
big_img.paste(img, (left, top))
if big_img is None:
img_extent = None
else:
matrix_min_x, matrix_max_y = tile_matrix.topleftcorner
min_img_x = matrix_min_x + tile_span_x * min_col
max_img_y = matrix_max_y - tile_span_y * min_row
img_extent = (min_img_x, min_img_x + n_cols * tile_span_x,
max_img_y - n_rows * tile_span_y, max_img_y)
return big_img, img_extent
class WFSGeometrySource(object):
"""Web Feature Service (WFS) retrieval for Cartopy."""
def __init__(self, service, features, getfeature_extra_kwargs=None):
"""
Parameters
----------
service
The URL of a WFS, or an instance of
:class:`owslib.wfs.WebFeatureService`.
features
The typename(s) of the features from the WFS that
will be retrieved and made available as geometries.
getfeature_extra_kwargs: optional
Extra keyword args to pass to the service's `getfeature` call.
Defaults to None
"""
if WebFeatureService is None:
raise ImportError(_OWSLIB_REQUIRED)
if isinstance(service, six.string_types):
service = WebFeatureService(service)
if isinstance(features, six.string_types):
features = [features]
if getfeature_extra_kwargs is None:
getfeature_extra_kwargs = {}
if not features:
raise ValueError('One or more features must be specified.')
for feature in features:
if feature not in service.contents:
raise ValueError('The {!r} feature does not exist in this '
'service.'.format(feature))
self.service = service
self.features = features
self.getfeature_extra_kwargs = getfeature_extra_kwargs
self._default_urn = None
def default_projection(self):
"""
Return a :class:`cartopy.crs.Projection` in which the WFS
service can supply the requested features.
"""
# Using first element in crsOptions (default).
if self._default_urn is None:
default_urn = set(self.service.contents[feature].crsOptions[0] for
feature in self.features)
if len(default_urn) != 1:
ValueError('Failed to find a single common default SRS '
'across all features (typenames).')
else:
default_urn = default_urn.pop()
if six.text_type(default_urn) not in _URN_TO_CRS:
raise ValueError('Unknown mapping from SRS/CRS_URN {!r} to '
'cartopy projection.'.format(default_urn))
self._default_urn = default_urn
return _URN_TO_CRS[six.text_type(self._default_urn)]
def fetch_geometries(self, projection, extent):
"""
Return any Point, Linestring or LinearRing geometries available
from the WFS that lie within the specified extent.
Parameters
----------
projection: :class:`cartopy.crs.Projection`
The projection in which the extent is specified and in
which the geometries should be returned. Only the default
(native) projection is supported.
extent: four element tuple
(min_x, max_x, min_y, max_y) tuple defining the geographic extent
of the geometries to obtain.
Returns
-------
geoms
A list of Shapely geometries.
"""
if self.default_projection() != projection:
raise ValueError('Geometries are only available in projection '
'{!r}.'.format(self.default_projection()))
min_x, max_x, min_y, max_y = extent
response = self.service.getfeature(typename=self.features,
bbox=(min_x, min_y, max_x, max_y),
**self.getfeature_extra_kwargs)
geoms_by_srs = self._to_shapely_geoms(response)
if not geoms_by_srs:
geoms = []
elif len(geoms_by_srs) > 1:
raise ValueError('Unexpected response from the WFS server. The '
'geometries are in multiple SRSs, when only one '
'was expected.')
else:
srs, geoms = list(geoms_by_srs.items())[0]
# Attempt to verify the SRS associated with the geometries (if any)
# matches the specified projection.
if srs is not None:
if srs in _URN_TO_CRS:
geom_proj = _URN_TO_CRS[srs]
if geom_proj != projection:
raise ValueError('The geometries are not in expected '
'projection. Expected {!r}, got '
'{!r}.'.format(projection, geom_proj))
else:
msg = 'Unable to verify matching projections due ' \
'to incomplete mappings from SRS identifiers ' \
'to cartopy projections. The geometries have ' \
'an SRS of {!r}.'.format(srs)
warnings.warn(msg)
return geoms
def _to_shapely_geoms(self, response):
"""
Convert polygon coordinate strings in WFS response XML to Shapely
geometries.
Parameters
----------
response: (file-like object)
WFS response XML data.
Returns
-------
geoms_by_srs
A dictionary containing geometries, with key-value pairs of
the form {srsname: [geoms]}.
"""
linear_rings_data = []
linestrings_data = []
points_data = []
tree = ElementTree.parse(response)
for node in tree.findall('.//{}msGeometry'.format(_MAP_SERVER_NS)):
# Find LinearRing geometries in our msGeometry node.
find_str = './/{gml}LinearRing'.format(gml=_GML_NS)
if self._node_has_child(node, find_str):
data = self._find_polygon_coords(node, find_str)
linear_rings_data.extend(data)
# Find LineString geometries in our msGeometry node.
find_str = './/{gml}LineString'.format(gml=_GML_NS)
if self._node_has_child(node, find_str):
data = self._find_polygon_coords(node, find_str)
linestrings_data.extend(data)
# Find Point geometries in our msGeometry node.
find_str = './/{gml}Point'.format(gml=_GML_NS)
if self._node_has_child(node, find_str):
data = self._find_polygon_coords(node, find_str)
points_data.extend(data)
geoms_by_srs = {}
for srs, x, y in linear_rings_data:
geoms_by_srs.setdefault(srs, []).append(
sgeom.LinearRing(zip(x, y)))
for srs, x, y in linestrings_data:
geoms_by_srs.setdefault(srs, []).append(
sgeom.LineString(zip(x, y)))
for srs, x, y in points_data:
geoms_by_srs.setdefault(srs, []).append(
sgeom.Point(zip(x, y)))
return geoms_by_srs
def _find_polygon_coords(self, node, find_str):
"""
Return the x, y coordinate values for all the geometries in
a given`node`.
Parameters
----------
node: :class:`xml.etree.ElementTree.Element`
Node of the parsed XML response.
find_str: string
A search string used to match subelements that contain
the coordinates of interest, for example:
'.//{http://www.opengis.net/gml}LineString'
Returns
-------
data
A list of (srsName, x_vals, y_vals) tuples.
"""
data = []
for polygon in node.findall(find_str):
feature_srs = polygon.attrib.get('srsName')
x, y = [], []
# We can have nodes called `coordinates` or `coord`.
coordinates_find_str = '{}coordinates'.format(_GML_NS)
coords_find_str = '{}coord'.format(_GML_NS)
if self._node_has_child(polygon, coordinates_find_str):
points = polygon.findtext(coordinates_find_str)
coords = points.strip().split(' ')
for coord in coords:
x_val, y_val = coord.split(',')
x.append(float(x_val))
y.append(float(y_val))
elif self._node_has_child(polygon, coords_find_str):
for coord in polygon.findall(coords_find_str):
x.append(float(coord.findtext('{}X'.format(_GML_NS))))
y.append(float(coord.findtext('{}Y'.format(_GML_NS))))
else:
raise ValueError('Unable to find or parse coordinate values '
'from the XML.')
data.append((feature_srs, x, y))
return data
@staticmethod
def _node_has_child(node, find_str):
"""
Return whether `node` contains (at any sub-level), a node with name
equal to `find_str`.
"""
element = node.find(find_str)
return element is not None
| lgpl-3.0 |
ywcui1990/nupic.research | projects/sequence_classification/run_sequence_classifcation_experiment.py | 11 | 21901 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import pickle
import time
import matplotlib.pyplot as plt
import multiprocessing
from util_functions import *
from nupic.encoders.random_distributed_scalar import RandomDistributedScalarEncoder
plt.ion()
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams.update({'figure.autolayout': True})
def rdse_encoder_nearest_neighbor(trainData, trainLabel, unknownSequence, encoder):
overlapSum = np.zeros((trainData.shape[0],))
for i in range(trainData.shape[0]):
overlapI = np.zeros((len(unknownSequence),))
for t in range(len(unknownSequence)):
overlapI[t] = np.sum(np.logical_and(encoder.encode(unknownSequence[t]),
encoder.encode(trainData[i, t])))
overlapSum[i] = np.sum(overlapI)
predictedClass = trainLabel[np.argmax(overlapSum)]
return predictedClass
def constructDistanceMat(distMatColumn, distMatCell, trainLabel, wOpt, bOpt):
numTest, numTrain = distMatColumn.shape
classList = np.unique(trainLabel).tolist()
distanceMat = np.zeros((numTest, numTrain))
for classI in classList:
classIidx = np.where(trainLabel == classI)[0]
distanceMat[:, classIidx] = \
(1 - wOpt[classI]) * distMatColumn[:, classIidx] + \
wOpt[classI] * distMatCell[:, classIidx] + bOpt[classI]
return distanceMat
def runTMOnSequence(tm, activeColumns, unionLength=1):
numCells = tm.getCellsPerColumn() * tm.getColumnDimensions()[0]
activeCellsTrace = []
predictiveCellsTrace = []
predictedActiveCellsTrace = []
activeColumnTrace = []
activationFrequency = np.zeros((numCells,))
predictedActiveFrequency = np.zeros((numCells,))
unionStepInBatch = 0
unionBatchIdx = 0
unionCells = set()
unionCols = set()
tm.reset()
for t in range(len(activeColumns)):
tm.compute(activeColumns[t], learn=False)
activeCellsTrace.append(set(tm.getActiveCells()))
predictiveCellsTrace.append(set(tm.getPredictiveCells()))
if t == 0:
predictedActiveCells = set()
else:
predictedActiveCells = activeCellsTrace[t].intersection(
predictiveCellsTrace[t - 1])
activationFrequency[tm.getActiveCells()] += 1
predictedActiveFrequency[list(predictedActiveCells)] += 1
unionCells = unionCells.union(predictedActiveCells)
unionCols = unionCols.union(activeColumns[t])
unionStepInBatch += 1
if unionStepInBatch == unionLength:
predictedActiveCellsTrace.append(unionCells)
activeColumnTrace.append(unionCols)
unionStepInBatch = 0
unionBatchIdx += 1
unionCells = set()
unionCols = set()
if unionStepInBatch > 0:
predictedActiveCellsTrace.append(unionCells)
activeColumnTrace.append(unionCols)
activationFrequency = activationFrequency / np.sum(activationFrequency)
predictedActiveFrequency = predictedActiveFrequency / np.sum(
predictedActiveFrequency)
return (activeColumnTrace,
predictedActiveCellsTrace,
activationFrequency,
predictedActiveFrequency)
def runTMOverDatasetFast(tm, activeColumns, unionLength=1):
"""
Run encoder -> tm network over dataset, save activeColumn and activeCells
traces
:param tm:
:param encoder:
:param dataset:
:return:
"""
numSequence = len(activeColumns)
predictedActiveCellsUnionTrace = []
activationFrequencyTrace = []
predictedActiveFrequencyTrace = []
activeColumnUnionTrace = []
for i in range(numSequence):
(activeColumnTrace,
predictedActiveCellsTrace,
activationFrequency,
predictedActiveFrequency) = runTMOnSequence(tm, activeColumns[i], unionLength)
predictedActiveCellsUnionTrace.append(predictedActiveCellsTrace)
activeColumnUnionTrace.append(activeColumnTrace)
activationFrequencyTrace.append(activationFrequency)
predictedActiveFrequencyTrace.append(predictedActiveFrequency)
# print "{} out of {} done ".format(i, numSequence)
return (activeColumnUnionTrace,
predictedActiveCellsUnionTrace,
activationFrequencyTrace,
predictedActiveFrequencyTrace)
def runEncoderOverDataset(encoder, dataset):
activeColumnsData = []
for i in range(dataset.shape[0]):
activeColumnsTrace = []
for element in dataset[i, :]:
encoderOutput = encoder.encode(element)
activeColumns = set(np.where(encoderOutput > 0)[0])
activeColumnsTrace.append(activeColumns)
activeColumnsData.append(activeColumnsTrace)
# print "{} out of {} done ".format(i, dataset.shape[0])
return activeColumnsData
def calcualteEncoderModelWorker(taskQueue, resultQueue, *args):
while True:
nextTask = taskQueue.get()
print "Next task is : ", nextTask
if nextTask is None:
break
nBuckets = nextTask["nBuckets"]
accuracyColumnOnly = calculateEncoderModelAccuracy(nBuckets, *args)
resultQueue.put({nBuckets: accuracyColumnOnly})
print "Column Only model, Resolution: {} Accuracy: {}".format(
nBuckets, accuracyColumnOnly)
return
def calculateEncoderModelAccuracy(nBuckets, numCols, w, trainData, trainLabel):
maxValue = np.max(trainData)
minValue = np.min(trainData)
resolution = (maxValue - minValue) / nBuckets
encoder = RandomDistributedScalarEncoder(resolution, w=w, n=numCols)
activeColumnsTrain = runEncoderOverDataset(encoder, trainData)
distMatColumnTrain = calculateDistanceMatTrain(activeColumnsTrain)
meanAccuracy, outcomeColumn = calculateAccuracy(distMatColumnTrain,
trainLabel, trainLabel)
accuracyColumnOnly = np.mean(outcomeColumn)
return accuracyColumnOnly
def searchForOptimalEncoderResolution(nBucketList, trainData, trainLabel, numCols, w):
numCPU = multiprocessing.cpu_count()
# Establish communication queues
taskQueue = multiprocessing.JoinableQueue()
resultQueue = multiprocessing.Queue()
for nBuckets in nBucketList:
taskQueue.put({"nBuckets": nBuckets})
for _ in range(numCPU):
taskQueue.put(None)
jobs = []
for i in range(numCPU):
print "Start process ", i
p = multiprocessing.Process(target=calcualteEncoderModelWorker,
args=(taskQueue, resultQueue, numCols, w, trainData, trainLabel))
jobs.append(p)
p.daemon = True
p.start()
# p.join()
# taskQueue.join()
while not taskQueue.empty():
time.sleep(0.1)
accuracyVsResolution = np.zeros((len(nBucketList,)))
while not resultQueue.empty():
exptResult = resultQueue.get()
nBuckets = exptResult.keys()[0]
accuracyVsResolution[nBucketList.index(nBuckets)] = exptResult[nBuckets]
return accuracyVsResolution
if __name__ == "__main__":
plt.close('all')
datasetName = "SyntheticData"
dataSetList = listDataSets(datasetName)
# datasetName = 'UCR_TS_Archive_2015'
# dataSetList = listDataSets(datasetName)
# dataSetList = ["synthetic_control"]
skipTMmodel = False
for dataName in dataSetList:
trainData, trainLabel, testData, testLabel = loadDataset(
dataName, datasetName)
numTest = len(testLabel)
numTrain = len(trainLabel)
sequenceLength = len(trainData[0])
classList = np.unique(trainLabel).tolist()
numClass = len(classList)
print "Processing {}".format(dataName)
print "Train Sample # {}, Test Sample # {}".format(numTrain, numTest)
print "Sequence Length {} Class # {}".format(sequenceLength, len(classList))
EuclideanDistanceMat = calculateEuclideanDistanceMat(testData, trainData)
outcomeEuclidean = calculateEuclideanModelAccuracy(trainData, trainLabel,
testData, testLabel)
accuracyEuclideanDist = np.mean(outcomeEuclidean)
print
print "Euclidean model accuracy: {}".format(accuracyEuclideanDist)
print
# # Use SDR overlap instead of Euclidean distance
print "Running Encoder model"
maxValue = np.max(trainData)
minValue = np.min(trainData)
numCols = 2048
w = 41
try:
searchResolution = pickle.load(
open('results/optimalEncoderResolution/{}'.format(dataName), 'r'))
nBucketList = searchResolution['nBucketList']
accuracyVsResolution = searchResolution['accuracyVsResolution']
optNumBucket = nBucketList[smoothArgMax(np.array(accuracyVsResolution))]
optimalResolution = (maxValue - minValue)/optNumBucket
except:
nBucketList = range(20, 200, 10)
accuracyVsResolution = searchForOptimalEncoderResolution(
nBucketList, trainData, trainLabel, numCols, w)
optNumBucket = nBucketList[np.argmax(np.array(accuracyVsResolution))]
optimalResolution = (maxValue - minValue)/optNumBucket
searchResolution = {
'nBucketList': nBucketList,
'accuracyVsResolution': accuracyVsResolution,
'optimalResolution': optimalResolution
}
# save optimal resolution for future use
outputFile = open('results/optimalEncoderResolution/{}'.format(dataName), 'w')
pickle.dump(searchResolution, outputFile)
outputFile.close()
print "optimal bucket # {}".format((maxValue - minValue)/optimalResolution)
encoder = RandomDistributedScalarEncoder(optimalResolution, w=w, n=numCols)
print "encoding train data ..."
activeColumnsTrain = runEncoderOverDataset(encoder, trainData)
print "encoding test data ..."
activeColumnsTest = runEncoderOverDataset(encoder, testData)
print "calculate column distance matrix ..."
distMatColumnTest = calculateDistanceMat(activeColumnsTest, activeColumnsTrain)
testAccuracyColumnOnly, outcomeColumn = calculateAccuracy(
distMatColumnTest, trainLabel, testLabel)
print
print "Column Only model, Accuracy: {}".format(testAccuracyColumnOnly)
expResults = {'accuracyEuclideanDist': accuracyEuclideanDist,
'accuracyColumnOnly': testAccuracyColumnOnly,
'EuclideanDistanceMat': EuclideanDistanceMat,
'distMatColumnTest': distMatColumnTest}
outputFile = open('results/modelPerformance/{}_columnOnly'.format(dataName), 'w')
pickle.dump(expResults, outputFile)
outputFile.close()
if skipTMmodel:
continue
# Train TM
from nupic.bindings.algorithms import TemporalMemory as TemporalMemoryCPP
tm = TemporalMemoryCPP(columnDimensions=(numCols, ),
cellsPerColumn=32,
permanenceIncrement=0.1,
permanenceDecrement=0.1,
predictedSegmentDecrement=0.01,
minThreshold=10,
activationThreshold=15,
maxNewSynapseCount=20)
print
print "Training TM on sequences ... "
numRepeatsBatch = 1
numRptsPerSequence = 1
np.random.seed(10)
for rpt in xrange(numRepeatsBatch):
# randomize the order of training sequences
randomIdx = np.random.permutation(range(numTrain))
for i in range(numTrain):
for _ in xrange(numRptsPerSequence):
for t in range(sequenceLength):
tm.compute(activeColumnsTrain[randomIdx[i]][t], learn=True)
tm.reset()
print "Rpt: {}, {} out of {} done ".format(rpt, i, trainData.shape[0])
# run TM over training data
unionLength = 20
print "Running TM on Training Data with union window {}".format(unionLength)
(activeColTrain,
activeCellsTrain,
activeFreqTrain,
predActiveFreqTrain) = runTMOverDatasetFast(tm, activeColumnsTrain, unionLength)
# construct two distance matrices using training data
distMatColumnTrain = calculateDistanceMat(activeColTrain, activeColTrain)
distMatCellTrain = calculateDistanceMat(activeCellsTrain, activeCellsTrain)
distMatActiveFreqTrain = calculateDistanceMat(activeFreqTrain, activeFreqTrain)
distMatPredActiveFreqTrain = calculateDistanceMat(predActiveFreqTrain, predActiveFreqTrain)
maxColumnOverlap = np.max(distMatColumnTrain)
maxCellOverlap = np.max(distMatCellTrain)
distMatColumnTrain /= maxColumnOverlap
distMatCellTrain /= maxCellOverlap
# set diagonal line to zeros
for i in range(trainData.shape[0]):
distMatColumnTrain[i, i] = 0
distMatCellTrain[i, i] = 0
print "Running TM on Testing Data ... "
(activeColTest,
activeCellsTest,
activeFreqTest,
predActiveFreqTest) = runTMOverDatasetFast(tm, activeColumnsTest, unionLength)
distMatColumnTest = calculateDistanceMat(activeColTest, activeColTrain)
distMatCellTest = calculateDistanceMat(activeCellsTest, activeCellsTrain)
distMatActiveFreqTest = calculateDistanceMat(activeFreqTest, activeFreqTrain)
distMatPredActiveFreqTest = calculateDistanceMat(predActiveFreqTest, predActiveFreqTrain)
distMatColumnTest /= maxColumnOverlap
distMatCellTest /= maxCellOverlap
expResultTM = {"distMatColumnTrain": distMatColumnTrain,
"distMatCellTrain": distMatCellTrain,
"distMatActiveFreqTrain": distMatActiveFreqTrain,
"distMatPredActiveFreqTrain": distMatPredActiveFreqTrain,
"distMatColumnTest": distMatColumnTest,
"distMatCellTest": distMatCellTest,
"distMatActiveFreqTest": distMatActiveFreqTest,
"distMatPredActiveFreqTest": distMatPredActiveFreqTest}
pickle.dump(expResultTM, open('results/distanceMat/{}_union_{}'.format(
dataName, unionLength), 'w'))
activeFreqResult = {"activeFreqTrain": activeFreqTrain,
"activeFreqTest": activeFreqTest,
"predActiveFreqTrain": predActiveFreqTrain,
"predActiveFreqTest": predActiveFreqTest}
pickle.dump(activeFreqResult, open('results/activeFreq/{}'.format(
dataName), 'w'))
# fit supervised model
from htmresearch.algorithms.sdr_classifier_batch import classificationNetwork
classIdxMapTrain = {}
classIdxMapTest = {}
for classIdx in classList:
classIdxMapTrain[classIdx] = np.where(trainLabel == classIdx)[0]
classIdxMapTest[classIdx] = np.where(testLabel == classIdx)[0]
options = {"useColumnRepresentation": False,
"useCellRepresentation": True}
classifierInputTrain = prepareClassifierInput(
distMatColumnTrain, distMatCellTrain, classIdxMapTrain, trainLabel, options)
classifierInputTest = prepareClassifierInput(
distMatColumnTest, distMatCellTest, classIdxMapTrain, trainLabel, options)
numInputs = len(classifierInputTrain[0])
regularizationLambda = {"lambdaL2": [1],
"wIndice": [np.array(range(0, numInputs*numClass))]}
# regularizationLambda = None
cl = classificationNetwork(numInputs, numClass, regularizationLambda)
wInit = np.zeros((numInputs, numClass))
for classIdx in range(numClass):
wInit[classIdx, classIdx] = 1
wInit = np.reshape(wInit, (numInputs*numClass, ))
cl.optimize(classifierInputTrain, trainLabel, wInit)
trainAccuracy = cl.accuracy(classifierInputTrain, trainLabel)
testAccuracy = cl.accuracy(classifierInputTest, testLabel)
print "Train accuracy: {} test accuracy: {}".format(trainAccuracy,
testAccuracy)
# default to use column distance only
wOpt = {}
bOpt = {}
for classI in classList:
wOpt[classI] = 0
bOpt[classI] = 0
# estimate the optimal weight factors
wList = np.linspace(0, 1.0, 101)
accuracyVsWRpt = np.zeros((len(wList), ))
for i in range(len(wList)):
accuracyVsWRpt[i] = - costFuncSharedW(
wList[i], wOpt, bOpt, distMatColumnTrain, distMatCellTrain, trainLabel, classList)
bestWForClassI = wList[np.argmax(accuracyVsWRpt)]
for classI in classList:
wOpt[classI] = bestWForClassI
combinedDistanceMat = constructDistanceMat(distMatColumnTest,
distMatCellTest,
trainLabel, wOpt, bOpt)
# testing
print "Column Only model, Accuracy: {}".format(testAccuracyColumnOnly)
testAccuracyActiveFreq, outcomeFreq = calculateAccuracy(
distMatActiveFreqTest, trainLabel, testLabel)
print "Active Freq Dist Accuracy {}".format(testAccuracyActiveFreq)
testAccuracyPredActiveFreq, outcomeFreq = calculateAccuracy(
distMatPredActiveFreqTest, trainLabel, testLabel)
print "Pred-Active Freq Dist Accuracy {}".format(testAccuracyPredActiveFreq)
testAccuracyCellOnly, outcomeCellOnly = calculateAccuracy(
distMatCellTest, trainLabel, testLabel)
print "Cell Dist accuracy {}".format(testAccuracyCellOnly)
testAccuracyCombined, outcomeTM = calculateAccuracy(
combinedDistanceMat, trainLabel, testLabel)
print "Mixed weight accuracy {}".format(testAccuracyCombined)
distMatColumnTestSort = sortDistanceMat(distMatColumnTest, trainLabel, testLabel)
distMatCellTestSort = sortDistanceMat(distMatCellTest, trainLabel,
testLabel)
distMatActiveFreqTestSort = sortDistanceMat(distMatActiveFreqTest, trainLabel,
testLabel)
distMatPredActiveFreqTestSort = sortDistanceMat(distMatPredActiveFreqTest,
trainLabel,
testLabel)
EuclideanDistanceMatSort = sortDistanceMat(EuclideanDistanceMat, trainLabel,
testLabel)
combinedDistanceMatSort = sortDistanceMat(combinedDistanceMat, trainLabel,
testLabel)
vLineLocs, hLineLocs = calculateClassLines(trainLabel, testLabel, classList)
fig, ax = plt.subplots(2, 3)
ax[0, 0].imshow(distMatColumnTestSort)
addClassLines(ax[0, 0], vLineLocs, hLineLocs)
ax[0, 0].set_title('Column Dist, {:2.2f}'.format(testAccuracyColumnOnly))
ax[0, 1].imshow(-EuclideanDistanceMatSort)
addClassLines(ax[0, 1], vLineLocs, hLineLocs)
ax[0, 1].set_title('- Euclidean Dist {:2.2f}'.format(accuracyEuclideanDist))
ax[1, 0].imshow(distMatCellTestSort)
addClassLines(ax[1, 0], vLineLocs, hLineLocs)
ax[1, 0].set_title('Cell Dist, {:2.2f}'.format(testAccuracyCellOnly))
ax[1, 1].imshow(distMatActiveFreqTestSort)
addClassLines(ax[1, 1], vLineLocs, hLineLocs)
ax[1, 1].set_title('Active Freq, {:2.2f}'.format(testAccuracyActiveFreq))
ax[1, 2].imshow(distMatPredActiveFreqTestSort)
addClassLines(ax[1, 2], vLineLocs, hLineLocs)
ax[1, 2].set_title('Pred-Active Freq, {:2.2f}'.format(testAccuracyPredActiveFreq))
ax[0, 2].imshow(combinedDistanceMatSort)
addClassLines(ax[0, 2], vLineLocs, hLineLocs)
ax[0, 2].set_title('Combined Dist {:2.2f}'.format(testAccuracyCombined))
plt.savefig('figures/{}_summary.pdf'.format(dataName))
# accuracy per class
accuracyTM = {}
accuracyEuclidean = {}
accuracyColumn = {}
accuracyCellOnly = {}
for classI in classList:
idx = np.where(testLabel == classI)[0]
accuracyTM[classI] = np.mean(np.array(outcomeTM)[idx])
accuracyEuclidean[classI] = np.mean(np.array(outcomeEuclidean)[idx])
accuracyColumn[classI] = np.mean(np.array(outcomeColumn)[idx])
accuracyCellOnly[classI] = np.mean(np.array(outcomeCellOnly)[idx])
fig, ax = plt.subplots()
width=0.5
ax.bar(0, accuracyEuclideanDist, width, color='c')
ax.bar(1, testAccuracyColumnOnly, width, color='g')
ax.bar(2, testAccuracyCellOnly, width, color='b')
ax.bar(3, testAccuracyCombined, width, color='r')
plt.xlim([0, 4])
plt.legend([ 'Euclidean', 'Column', 'Cell', 'Weighted Dist'], loc=3)
plt.savefig('figures/{}_performance_overall.pdf'.format(dataName))
fig, ax = plt.subplots()
width = 0.2
classIdx = np.array(accuracyTM.keys())
ax.bar(classIdx-width, accuracyTM.values(), width, color='r')
ax.bar(classIdx, accuracyColumn.values(), width, color='g')
ax.bar(classIdx+width, accuracyCellOnly.values(), width, color='b')
ax.bar(classIdx+2*width, accuracyEuclidean.values(), width, color='c')
plt.legend(['Weighted Dist', 'Column', 'Cell', 'Euclidean'], loc=3)
plt.ylabel('Accuracy')
plt.xlabel('Class #')
plt.ylim([0, 1.05])
plt.savefig('figures/{}_performance_by_class.pdf'.format(dataName))
expResults = {'accuracyEuclideanDist': accuracyEuclideanDist,
'accuracyColumnOnly': testAccuracyColumnOnly,
'accuracyTM': testAccuracyCombined,
'weights': wOpt,
'offsets': bOpt,
'EuclideanDistanceMat': EuclideanDistanceMat,
'activeColumnOverlap': distMatColumnTrain,
'activeCellOverlap': distMatCellTrain}
pickle.dump(expResults, open('results/modelPerformance/{}'.format(dataName), 'w'))
| agpl-3.0 |
terkkila/scikit-learn | sklearn/preprocessing/label.py | 35 | 28877 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Joel Nothman <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import np_version
from ..utils.fixes import sparse_min_max
from ..utils.fixes import astype
from ..utils.fixes import in1d
from ..utils import deprecated, column_or_1d
from ..utils.validation import check_array
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
def _check_numpy_unicode_bug(labels):
"""Check that user is not subject to an old numpy bug
Fixed in master before 1.7.0:
https://github.com/numpy/numpy/pull/243
"""
if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U':
raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted"
" on unicode data correctly. Please upgrade"
" NumPy to use LabelEncoder with unicode inputs.")
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def _check_fitted(self):
if not hasattr(self, "classes_"):
raise ValueError("LabelEncoder was not fitted yet.")
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
self._check_fitted()
classes = np.unique(y)
_check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
self._check_fitted()
diff = np.setdiff1d(y, np.arange(len(self.classes_)))
if diff:
raise ValueError("y contains new labels: %s" % str(diff))
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'mutliclass-multioutput', 'multilabel-sequences',
'multilabel-indicator', and 'unknown'.
multilabel_ : boolean
True if the transformer was fitted on a multilabel rather than a
multiclass set of labels. The ``multilabel_`` attribute is deprecated
and will be removed in 0.18
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
indicator_matrix_ : str
'sparse' when the input data to tansform is a multilable-indicator and
is sparse, None otherwise. The ``indicator_matrix_`` attribute is
deprecated as of version 0.16 and will be removed in 0.18
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
@property
@deprecated("Attribute ``indicator_matrix_`` is deprecated and will be "
"removed in 0.17. Use ``y_type_ == 'multilabel-indicator'`` "
"instead")
def indicator_matrix_(self):
return self.y_type_ == 'multilabel-indicator'
@property
@deprecated("Attribute ``multilabel_`` is deprecated and will be removed "
"in 0.17. Use ``y_type_.startswith('multilabel')`` "
"instead")
def multilabel_(self):
return self.y_type_.startswith('multilabel')
def _check_fitted(self):
if not hasattr(self, "classes_"):
raise ValueError("LabelBinarizer was not fitted yet.")
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape (n_samples,) or (n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array or sparse matrix of shape (n_samples,) or
(n_samples, n_classes) Target values. The 2-d matrix should only
contain 0 and 1, represents multilabel classification. Sparse
matrix can be CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
self._check_fitted()
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
self._check_fitted()
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1,
sparse_output=False, multilabel=None):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
if multilabel is not None:
warnings.warn("The multilabel parameter is deprecated as of version "
"0.15 and will be removed in 0.17. The parameter is no "
"longer necessary because the value is automatically "
"inferred.", DeprecationWarning)
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if len(classes) == 1:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
elif y_type == "multilabel-sequences":
Y = MultiLabelBinarizer(classes=classes,
sparse_output=sparse_output).fit_transform(y)
if sp.issparse(Y):
Y.data[:] = pos_label
else:
Y[Y == 1] = pos_label
return Y
if not sparse_output:
Y = Y.toarray()
Y = astype(Y, int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = astype(Y.data, int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
y = np.empty(len(y), dtype=classes.dtype)
y.fill(classes[0])
return y
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
elif output_type == "multilabel-sequences":
warnings.warn('Direct support for sequence of sequences multilabel '
'representation will be unavailable from version 0.17. '
'Use sklearn.preprocessing.MultiLabelBinarizer to '
'convert to a label indicator representation.',
DeprecationWarning)
mlb = MultiLabelBinarizer(classes=classes).fit([])
return mlb.inverse_transform(y)
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
yt.indices = np.take(inverse, yt.indices)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
| bsd-3-clause |
IndraVikas/scikit-learn | examples/linear_model/plot_sgd_penalties.py | 249 | 1563 | """
==============
SGD: Penalties
==============
Plot the contours of the three penalties.
All of the above are supported by
:class:`sklearn.linear_model.stochastic_gradient`.
"""
from __future__ import division
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def l1(xs):
return np.array([np.sqrt((1 - np.sqrt(x ** 2.0)) ** 2.0) for x in xs])
def l2(xs):
return np.array([np.sqrt(1.0 - x ** 2.0) for x in xs])
def el(xs, z):
return np.array([(2 - 2 * x - 2 * z + 4 * x * z -
(4 * z ** 2
- 8 * x * z ** 2
+ 8 * x ** 2 * z ** 2
- 16 * x ** 2 * z ** 3
+ 8 * x * z ** 3 + 4 * x ** 2 * z ** 4) ** (1. / 2)
- 2 * x * z ** 2) / (2 - 4 * z) for x in xs])
def cross(ext):
plt.plot([-ext, ext], [0, 0], "k-")
plt.plot([0, 0], [-ext, ext], "k-")
xs = np.linspace(0, 1, 100)
alpha = 0.501 # 0.5 division throuh zero
cross(1.2)
plt.plot(xs, l1(xs), "r-", label="L1")
plt.plot(xs, -1.0 * l1(xs), "r-")
plt.plot(-1 * xs, l1(xs), "r-")
plt.plot(-1 * xs, -1.0 * l1(xs), "r-")
plt.plot(xs, l2(xs), "b-", label="L2")
plt.plot(xs, -1.0 * l2(xs), "b-")
plt.plot(-1 * xs, l2(xs), "b-")
plt.plot(-1 * xs, -1.0 * l2(xs), "b-")
plt.plot(xs, el(xs, alpha), "y-", label="Elastic Net")
plt.plot(xs, -1.0 * el(xs, alpha), "y-")
plt.plot(-1 * xs, el(xs, alpha), "y-")
plt.plot(-1 * xs, -1.0 * el(xs, alpha), "y-")
plt.xlabel(r"$w_0$")
plt.ylabel(r"$w_1$")
plt.legend()
plt.axis("equal")
plt.show()
| bsd-3-clause |
wallarelvo/malt | scripts/create_scene.py | 1 | 5265 |
import numpy as np
import matplotlib
import matplotlib.pylab
import math
import random
"""
DISCLAIMER: The crappy code below does not belong to me, I am simply
too lazy to modify it.
"""
def make_height_func(noise_func, noise_min, noise_max):
def height_func(i):
return noise_func(noise_min * 2 ** -i, noise_max * 2 ** -i)
return height_func
def make_space(width, height, height_func):
space = np.zeros((width, height))
corner = height_func(0)
space[0, 0] = corner
space[0, -1] = corner
space[-1, 0] = corner
space[-1, -1] = corner
return space
def avg(*args):
return sum(args) / len(args)
def distort_space(space, height_func):
x_max, y_max = space.shape
x_min = y_min = 0
x_max -= 1
y_max -= 1
side = x_max
squares = 1
i = 0
while side > 1:
for x in range(squares):
for y in range(squares):
#Locations
x_left = x * side
x_right = (x + 1) * side
y_top = y * side
y_bottom = (y + 1) * side
dx = side / 2
dy = side / 2
xm = x_left + dx
ym = y_top + dy
#Diamond step- create center avg for each square
space[xm, ym] = avg(
space[x_left, y_top],
space[x_left, y_bottom],
space[x_right, y_top],
space[x_right, y_bottom]
)
space[xm, ym] += height_func(i)
#Square step- create squares for each diamond
#Top Square
if (y_top - dy) < y_min:
temp = y_max - dy
else:
temp = y_top - dy
space[xm, y_top] = avg(
space[x_left, y_top],
space[x_right, y_top],
space[xm, ym],
space[xm, temp]
)
space[xm, y_top] += height_func(i)
#Top Wrapping
if y_top == y_min:
space[xm, y_max] = space[xm, y_top]
#Bottom Square
if (y_bottom + dy) > y_max:
temp = y_top + dy
else:
temp = y_bottom - dy
space[xm, y_bottom] = avg(space[x_left, y_bottom],
space[x_right, y_bottom],
space[xm, ym],
space[xm, temp])
space[xm, y_bottom] += height_func(i)
#Bottom Wrapping
if y_bottom == y_max:
space[xm, y_min] = space[xm, y_bottom]
#Left Square
if (x_left - dx) < x_min:
temp = x_max - dx
else:
temp = x_left - dx
space[x_left, ym] = avg(space[x_left, y_top],
space[x_left, y_bottom],
space[xm, ym],
space[temp, ym])
space[x_left, ym] += height_func(i)
#Left Wrapping
if x_left == x_min:
space[x_max, ym] = space[x_left, ym]
#Right Square
if (x_right + dx) > x_max:
temp = x_min + dx
else:
temp = x_right + dx
space[x_right, ym] = avg(space[x_right, y_top],
space[x_right, y_bottom],
space[xm, ym],
space[temp, ym])
space[x_right, ym] += height_func(i)
#Right Wrapping
if x_right == x_max:
space[x_min, ym] = space[x_right, ym]
#Refine the pass
side /= 2
squares *= 2
i += 1
def save_surface(space, path):
bottom = min(space.flat)
top = max(space.flat)
norm = matplotlib.colors.Normalize(bottom, top)
x, y = space.shape
matplotlib.pylab.clf()
fig = matplotlib.pylab.gcf()
ax = fig.gca()
ScalarMap = ax.pcolorfast(space, norm=norm)
fig.colorbar(ScalarMap)
ax.axis('image')
fig.savefig(path + ".png", dpi=100)
np.savetxt(path + ".out", space)
ax.cla()
def get_space(width, height):
width = 2 ** math.ceil(math.log(width, 2)) + 1
height = 2 ** math.ceil(math.log(height, 2)) + 1
noise_func = random.uniform
noise_min = -1.0
noise_max = 1.0
height_func = make_height_func(noise_func, noise_min, noise_max)
#Initialize the space with random values
space = make_space(width, height, height_func)
#Square-Diamond Method
distort_space(space, height_func)
min_val = space.min()
max_val = space.max()
space = (space - min_val) / (max_val - min_val)
print space.mean()
return space
#save_surface(space, path)
if __name__ == "__main__":
import sys
dim = int(sys.argv[1])
name = sys.argv[2]
space = get_space(dim, dim)
save_surface(space, name)
| apache-2.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/sparse/tests/test_indexing.py | 7 | 38977 | # pylint: disable-msg=E1101,W0612
import nose # noqa
import numpy as np
import pandas as pd
import pandas.util.testing as tm
class TestSparseSeriesIndexing(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
self.sparse = self.orig.to_sparse()
def test_getitem(self):
orig = self.orig
sparse = self.sparse
self.assertEqual(sparse[0], 1)
self.assertTrue(np.isnan(sparse[1]))
self.assertEqual(sparse[3], 3)
result = sparse[[1, 3, 4]]
exp = orig[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse[orig % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse[sparse % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_getitem_slice(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse[:2], orig[:2].to_sparse())
tm.assert_sp_series_equal(sparse[4:2], orig[4:2].to_sparse())
tm.assert_sp_series_equal(sparse[::2], orig[::2].to_sparse())
tm.assert_sp_series_equal(sparse[-5:], orig[-5:].to_sparse())
def test_getitem_int_dtype(self):
# GH 8292
s = pd.SparseSeries([0, 1, 2, 3, 4, 5, 6], name='xxx')
res = s[::2]
exp = pd.SparseSeries([0, 2, 4, 6], index=[0, 2, 4, 6], name='xxx')
tm.assert_sp_series_equal(res, exp)
self.assertEqual(res.dtype, np.int64)
s = pd.SparseSeries([0, 1, 2, 3, 4, 5, 6], fill_value=0, name='xxx')
res = s[::2]
exp = pd.SparseSeries([0, 2, 4, 6], index=[0, 2, 4, 6],
fill_value=0, name='xxx')
tm.assert_sp_series_equal(res, exp)
self.assertEqual(res.dtype, np.int64)
def test_getitem_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
self.assertEqual(sparse[0], 1)
self.assertTrue(np.isnan(sparse[1]))
self.assertEqual(sparse[2], 0)
self.assertEqual(sparse[3], 3)
result = sparse[[1, 3, 4]]
exp = orig[[1, 3, 4]].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse[orig % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse[sparse % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_getitem_ellipsis(self):
# GH 9467
s = pd.SparseSeries([1, np.nan, 2, 0, np.nan])
tm.assert_sp_series_equal(s[...], s)
s = pd.SparseSeries([1, np.nan, 2, 0, np.nan], fill_value=0)
tm.assert_sp_series_equal(s[...], s)
def test_getitem_slice_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse[:2],
orig[:2].to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse[4:2],
orig[4:2].to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse[::2],
orig[::2].to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse[-5:],
orig[-5:].to_sparse(fill_value=0))
def test_loc(self):
orig = self.orig
sparse = self.sparse
self.assertEqual(sparse.loc[0], 1)
self.assertTrue(np.isnan(sparse.loc[1]))
result = sparse.loc[[1, 3, 4]]
exp = orig.loc[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# exceeds the bounds
result = sparse.loc[[1, 3, 4, 5]]
exp = orig.loc[[1, 3, 4, 5]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# padded with NaN
self.assertTrue(np.isnan(result[-1]))
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse.loc[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_loc_index(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan], index=list('ABCDE'))
sparse = orig.to_sparse()
self.assertEqual(sparse.loc['A'], 1)
self.assertTrue(np.isnan(sparse.loc['B']))
result = sparse.loc[['A', 'C', 'D']]
exp = orig.loc[['A', 'C', 'D']].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_loc_index_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
self.assertEqual(sparse.loc['A'], 1)
self.assertTrue(np.isnan(sparse.loc['B']))
result = sparse.loc[['A', 'C', 'D']]
exp = orig.loc[['A', 'C', 'D']].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
def test_loc_slice(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse.loc[2:], orig.loc[2:].to_sparse())
def test_loc_slice_index_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.loc['C':],
orig.loc['C':].to_sparse(fill_value=0))
def test_loc_slice_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.loc[2:],
orig.loc[2:].to_sparse(fill_value=0))
def test_iloc(self):
orig = self.orig
sparse = self.sparse
self.assertEqual(sparse.iloc[3], 3)
self.assertTrue(np.isnan(sparse.iloc[2]))
result = sparse.iloc[[1, 3, 4]]
exp = orig.iloc[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
result = sparse.iloc[[1, -2, -4]]
exp = orig.iloc[[1, -2, -4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
with tm.assertRaises(IndexError):
sparse.iloc[[1, 3, 5]]
def test_iloc_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
self.assertEqual(sparse.iloc[3], 3)
self.assertTrue(np.isnan(sparse.iloc[1]))
self.assertEqual(sparse.iloc[4], 0)
result = sparse.iloc[[1, 3, 4]]
exp = orig.iloc[[1, 3, 4]].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
def test_iloc_slice(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse.iloc[2:], orig.iloc[2:].to_sparse())
def test_iloc_slice_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.iloc[2:],
orig.iloc[2:].to_sparse(fill_value=0))
def test_at(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
sparse = orig.to_sparse()
self.assertEqual(sparse.at[0], orig.at[0])
self.assertTrue(np.isnan(sparse.at[1]))
self.assertTrue(np.isnan(sparse.at[2]))
self.assertEqual(sparse.at[3], orig.at[3])
self.assertTrue(np.isnan(sparse.at[4]))
orig = pd.Series([1, np.nan, np.nan, 3, np.nan],
index=list('abcde'))
sparse = orig.to_sparse()
self.assertEqual(sparse.at['a'], orig.at['a'])
self.assertTrue(np.isnan(sparse.at['b']))
self.assertTrue(np.isnan(sparse.at['c']))
self.assertEqual(sparse.at['d'], orig.at['d'])
self.assertTrue(np.isnan(sparse.at['e']))
def test_at_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0],
index=list('abcde'))
sparse = orig.to_sparse(fill_value=0)
self.assertEqual(sparse.at['a'], orig.at['a'])
self.assertTrue(np.isnan(sparse.at['b']))
self.assertEqual(sparse.at['c'], orig.at['c'])
self.assertEqual(sparse.at['d'], orig.at['d'])
self.assertEqual(sparse.at['e'], orig.at['e'])
def test_iat(self):
orig = self.orig
sparse = self.sparse
self.assertEqual(sparse.iat[0], orig.iat[0])
self.assertTrue(np.isnan(sparse.iat[1]))
self.assertTrue(np.isnan(sparse.iat[2]))
self.assertEqual(sparse.iat[3], orig.iat[3])
self.assertTrue(np.isnan(sparse.iat[4]))
self.assertTrue(np.isnan(sparse.iat[-1]))
self.assertEqual(sparse.iat[-5], orig.iat[-5])
def test_iat_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse()
self.assertEqual(sparse.iat[0], orig.iat[0])
self.assertTrue(np.isnan(sparse.iat[1]))
self.assertEqual(sparse.iat[2], orig.iat[2])
self.assertEqual(sparse.iat[3], orig.iat[3])
self.assertEqual(sparse.iat[4], orig.iat[4])
self.assertEqual(sparse.iat[-1], orig.iat[-1])
self.assertEqual(sparse.iat[-5], orig.iat[-5])
def test_get(self):
s = pd.SparseSeries([1, np.nan, np.nan, 3, np.nan])
self.assertEqual(s.get(0), 1)
self.assertTrue(np.isnan(s.get(1)))
self.assertIsNone(s.get(5))
s = pd.SparseSeries([1, np.nan, 0, 3, 0], index=list('ABCDE'))
self.assertEqual(s.get('A'), 1)
self.assertTrue(np.isnan(s.get('B')))
self.assertEqual(s.get('C'), 0)
self.assertIsNone(s.get('XX'))
s = pd.SparseSeries([1, np.nan, 0, 3, 0], index=list('ABCDE'),
fill_value=0)
self.assertEqual(s.get('A'), 1)
self.assertTrue(np.isnan(s.get('B')))
self.assertEqual(s.get('C'), 0)
self.assertIsNone(s.get('XX'))
def test_take(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse.take([0]),
orig.take([0]).to_sparse())
tm.assert_sp_series_equal(sparse.take([0, 1, 3]),
orig.take([0, 1, 3]).to_sparse())
tm.assert_sp_series_equal(sparse.take([-1, -2]),
orig.take([-1, -2]).to_sparse())
def test_take_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0],
index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.take([0]),
orig.take([0]).to_sparse(fill_value=0))
exp = orig.take([0, 1, 3]).to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.take([0, 1, 3]), exp)
exp = orig.take([-1, -2]).to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.take([-1, -2]), exp)
def test_reindex(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse()
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse()
tm.assert_sp_series_equal(res, exp)
# all missing & fill_value
res = sparse.reindex(['B', 'E', 'C'])
exp = orig.reindex(['B', 'E', 'C']).to_sparse()
tm.assert_sp_series_equal(res, exp)
orig = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse()
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse()
tm.assert_sp_series_equal(res, exp)
def test_reindex_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
# includes missing and fill_value
res = sparse.reindex(['A', 'B', 'C'])
exp = orig.reindex(['A', 'B', 'C']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
# all missing
orig = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
# all fill_value
orig = pd.Series([0., 0., 0., 0., 0.],
index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
def tests_indexing_with_sparse(self):
# GH 13985
for kind in ['integer', 'block']:
for fill in [True, False, np.nan]:
arr = pd.SparseArray([1, 2, 3], kind=kind)
indexer = pd.SparseArray([True, False, True], fill_value=fill,
dtype=bool)
tm.assert_sp_array_equal(pd.SparseArray([1, 3], kind=kind),
arr[indexer])
s = pd.SparseSeries(arr, index=['a', 'b', 'c'],
dtype=np.float64)
exp = pd.SparseSeries([1, 3], index=['a', 'c'],
dtype=np.float64, kind=kind)
tm.assert_sp_series_equal(s[indexer], exp)
tm.assert_sp_series_equal(s.loc[indexer], exp)
tm.assert_sp_series_equal(s.iloc[indexer], exp)
indexer = pd.SparseSeries(indexer, index=['a', 'b', 'c'])
tm.assert_sp_series_equal(s[indexer], exp)
tm.assert_sp_series_equal(s.loc[indexer], exp)
msg = ("iLocation based boolean indexing cannot use an "
"indexable as a mask")
with tm.assertRaisesRegexp(ValueError, msg):
s.iloc[indexer]
class TestSparseSeriesMultiIndexing(TestSparseSeriesIndexing):
_multiprocess_can_split_ = True
def setUp(self):
# Mi with duplicated values
idx = pd.MultiIndex.from_tuples([('A', 0), ('A', 1), ('B', 0),
('C', 0), ('C', 1)])
self.orig = pd.Series([1, np.nan, np.nan, 3, np.nan], index=idx)
self.sparse = self.orig.to_sparse()
def test_getitem_multi(self):
orig = self.orig
sparse = self.sparse
self.assertEqual(sparse[0], orig[0])
self.assertTrue(np.isnan(sparse[1]))
self.assertEqual(sparse[3], orig[3])
tm.assert_sp_series_equal(sparse['A'], orig['A'].to_sparse())
tm.assert_sp_series_equal(sparse['B'], orig['B'].to_sparse())
result = sparse[[1, 3, 4]]
exp = orig[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse[orig % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse[sparse % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_getitem_multi_tuple(self):
orig = self.orig
sparse = self.sparse
self.assertEqual(sparse['C', 0], orig['C', 0])
self.assertTrue(np.isnan(sparse['A', 1]))
self.assertTrue(np.isnan(sparse['B', 0]))
def test_getitems_slice_multi(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse[2:], orig[2:].to_sparse())
tm.assert_sp_series_equal(sparse.loc['B':], orig.loc['B':].to_sparse())
tm.assert_sp_series_equal(sparse.loc['C':], orig.loc['C':].to_sparse())
tm.assert_sp_series_equal(sparse.loc['A':'B'],
orig.loc['A':'B'].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:'B'], orig.loc[:'B'].to_sparse())
def test_loc(self):
# need to be override to use different label
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse.loc['A'],
orig.loc['A'].to_sparse())
tm.assert_sp_series_equal(sparse.loc['B'],
orig.loc['B'].to_sparse())
result = sparse.loc[[1, 3, 4]]
exp = orig.loc[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# exceeds the bounds
result = sparse.loc[[1, 3, 4, 5]]
exp = orig.loc[[1, 3, 4, 5]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse.loc[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_loc_multi_tuple(self):
orig = self.orig
sparse = self.sparse
self.assertEqual(sparse.loc['C', 0], orig.loc['C', 0])
self.assertTrue(np.isnan(sparse.loc['A', 1]))
self.assertTrue(np.isnan(sparse.loc['B', 0]))
def test_loc_slice(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse.loc['A':], orig.loc['A':].to_sparse())
tm.assert_sp_series_equal(sparse.loc['B':], orig.loc['B':].to_sparse())
tm.assert_sp_series_equal(sparse.loc['C':], orig.loc['C':].to_sparse())
tm.assert_sp_series_equal(sparse.loc['A':'B'],
orig.loc['A':'B'].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:'B'], orig.loc[:'B'].to_sparse())
class TestSparseDataFrameIndexing(tm.TestCase):
_multiprocess_can_split_ = True
def test_getitem(self):
orig = pd.DataFrame([[1, np.nan, np.nan],
[2, 3, np.nan],
[np.nan, np.nan, 4],
[0, np.nan, 5]],
columns=list('xyz'))
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse['x'], orig['x'].to_sparse())
tm.assert_sp_frame_equal(sparse[['x']], orig[['x']].to_sparse())
tm.assert_sp_frame_equal(sparse[['z', 'x']],
orig[['z', 'x']].to_sparse())
tm.assert_sp_frame_equal(sparse[[True, False, True, True]],
orig[[True, False, True, True]].to_sparse())
tm.assert_sp_frame_equal(sparse[[1, 2]],
orig[[1, 2]].to_sparse())
def test_getitem_fill_value(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse['y'],
orig['y'].to_sparse(fill_value=0))
exp = orig[['x']].to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse[['x']], exp)
exp = orig[['z', 'x']].to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse[['z', 'x']], exp)
indexer = [True, False, True, True]
exp = orig[indexer].to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse[indexer], exp)
exp = orig[[1, 2]].to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse[[1, 2]], exp)
def test_loc(self):
orig = pd.DataFrame([[1, np.nan, np.nan],
[2, 3, np.nan],
[np.nan, np.nan, 4]],
columns=list('xyz'))
sparse = orig.to_sparse()
self.assertEqual(sparse.loc[0, 'x'], 1)
self.assertTrue(np.isnan(sparse.loc[1, 'z']))
self.assertEqual(sparse.loc[2, 'z'], 4)
tm.assert_sp_series_equal(sparse.loc[0], orig.loc[0].to_sparse())
tm.assert_sp_series_equal(sparse.loc[1], orig.loc[1].to_sparse())
tm.assert_sp_series_equal(sparse.loc[2, :],
orig.loc[2, :].to_sparse())
tm.assert_sp_series_equal(sparse.loc[2, :],
orig.loc[2, :].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:, 'y'],
orig.loc[:, 'y'].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:, 'y'],
orig.loc[:, 'y'].to_sparse())
result = sparse.loc[[1, 2]]
exp = orig.loc[[1, 2]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[[1, 2], :]
exp = orig.loc[[1, 2], :].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[:, ['x', 'z']]
exp = orig.loc[:, ['x', 'z']].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[[0, 2], ['x', 'z']]
exp = orig.loc[[0, 2], ['x', 'z']].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# exceeds the bounds
result = sparse.loc[[1, 3, 4, 5]]
exp = orig.loc[[1, 3, 4, 5]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# dense array
result = sparse.loc[orig.x % 2 == 1]
exp = orig.loc[orig.x % 2 == 1].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse.x % 2 == 1]
exp = orig.loc[orig.x % 2 == 1].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# sparse array
result = sparse.loc[pd.SparseArray(sparse.x % 2 == 1, dtype=bool)]
tm.assert_sp_frame_equal(result, exp)
def test_loc_index(self):
orig = pd.DataFrame([[1, np.nan, np.nan],
[2, 3, np.nan],
[np.nan, np.nan, 4]],
index=list('abc'), columns=list('xyz'))
sparse = orig.to_sparse()
self.assertEqual(sparse.loc['a', 'x'], 1)
self.assertTrue(np.isnan(sparse.loc['b', 'z']))
self.assertEqual(sparse.loc['c', 'z'], 4)
tm.assert_sp_series_equal(sparse.loc['a'], orig.loc['a'].to_sparse())
tm.assert_sp_series_equal(sparse.loc['b'], orig.loc['b'].to_sparse())
tm.assert_sp_series_equal(sparse.loc['b', :],
orig.loc['b', :].to_sparse())
tm.assert_sp_series_equal(sparse.loc['b', :],
orig.loc['b', :].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:, 'z'],
orig.loc[:, 'z'].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:, 'z'],
orig.loc[:, 'z'].to_sparse())
result = sparse.loc[['a', 'b']]
exp = orig.loc[['a', 'b']].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[['a', 'b'], :]
exp = orig.loc[['a', 'b'], :].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[:, ['x', 'z']]
exp = orig.loc[:, ['x', 'z']].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[['c', 'a'], ['x', 'z']]
exp = orig.loc[['c', 'a'], ['x', 'z']].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# dense array
result = sparse.loc[orig.x % 2 == 1]
exp = orig.loc[orig.x % 2 == 1].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse.x % 2 == 1]
exp = orig.loc[orig.x % 2 == 1].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# sparse array
result = sparse.loc[pd.SparseArray(sparse.x % 2 == 1, dtype=bool)]
tm.assert_sp_frame_equal(result, exp)
def test_loc_slice(self):
orig = pd.DataFrame([[1, np.nan, np.nan],
[2, 3, np.nan],
[np.nan, np.nan, 4]],
columns=list('xyz'))
sparse = orig.to_sparse()
tm.assert_sp_frame_equal(sparse.loc[2:], orig.loc[2:].to_sparse())
def test_iloc(self):
orig = pd.DataFrame([[1, np.nan, np.nan],
[2, 3, np.nan],
[np.nan, np.nan, 4]])
sparse = orig.to_sparse()
self.assertEqual(sparse.iloc[1, 1], 3)
self.assertTrue(np.isnan(sparse.iloc[2, 0]))
tm.assert_sp_series_equal(sparse.iloc[0], orig.loc[0].to_sparse())
tm.assert_sp_series_equal(sparse.iloc[1], orig.loc[1].to_sparse())
tm.assert_sp_series_equal(sparse.iloc[2, :],
orig.iloc[2, :].to_sparse())
tm.assert_sp_series_equal(sparse.iloc[2, :],
orig.iloc[2, :].to_sparse())
tm.assert_sp_series_equal(sparse.iloc[:, 1],
orig.iloc[:, 1].to_sparse())
tm.assert_sp_series_equal(sparse.iloc[:, 1],
orig.iloc[:, 1].to_sparse())
result = sparse.iloc[[1, 2]]
exp = orig.iloc[[1, 2]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.iloc[[1, 2], :]
exp = orig.iloc[[1, 2], :].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.iloc[:, [1, 0]]
exp = orig.iloc[:, [1, 0]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.iloc[[2], [1, 0]]
exp = orig.iloc[[2], [1, 0]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
with tm.assertRaises(IndexError):
sparse.iloc[[1, 3, 5]]
def test_iloc_slice(self):
orig = pd.DataFrame([[1, np.nan, np.nan],
[2, 3, np.nan],
[np.nan, np.nan, 4]],
columns=list('xyz'))
sparse = orig.to_sparse()
tm.assert_sp_frame_equal(sparse.iloc[2:], orig.iloc[2:].to_sparse())
def test_at(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse()
self.assertEqual(sparse.at['A', 'x'], orig.at['A', 'x'])
self.assertTrue(np.isnan(sparse.at['B', 'z']))
self.assertTrue(np.isnan(sparse.at['C', 'y']))
self.assertEqual(sparse.at['D', 'x'], orig.at['D', 'x'])
def test_at_fill_value(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
self.assertEqual(sparse.at['A', 'x'], orig.at['A', 'x'])
self.assertTrue(np.isnan(sparse.at['B', 'z']))
self.assertTrue(np.isnan(sparse.at['C', 'y']))
self.assertEqual(sparse.at['D', 'x'], orig.at['D', 'x'])
def test_iat(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse()
self.assertEqual(sparse.iat[0, 0], orig.iat[0, 0])
self.assertTrue(np.isnan(sparse.iat[1, 2]))
self.assertTrue(np.isnan(sparse.iat[2, 1]))
self.assertEqual(sparse.iat[2, 0], orig.iat[2, 0])
self.assertTrue(np.isnan(sparse.iat[-1, -2]))
self.assertEqual(sparse.iat[-1, -1], orig.iat[-1, -1])
def test_iat_fill_value(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
self.assertEqual(sparse.iat[0, 0], orig.iat[0, 0])
self.assertTrue(np.isnan(sparse.iat[1, 2]))
self.assertTrue(np.isnan(sparse.iat[2, 1]))
self.assertEqual(sparse.iat[2, 0], orig.iat[2, 0])
self.assertTrue(np.isnan(sparse.iat[-1, -2]))
self.assertEqual(sparse.iat[-1, -1], orig.iat[-1, -1])
def test_take(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
columns=list('xyz'))
sparse = orig.to_sparse()
tm.assert_sp_frame_equal(sparse.take([0]),
orig.take([0]).to_sparse())
tm.assert_sp_frame_equal(sparse.take([0, 1]),
orig.take([0, 1]).to_sparse())
tm.assert_sp_frame_equal(sparse.take([-1, -2]),
orig.take([-1, -2]).to_sparse())
def test_take_fill_value(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
exp = orig.take([0]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse.take([0]), exp)
exp = orig.take([0, 1]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse.take([0, 1]), exp)
exp = orig.take([-1, -2]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse.take([-1, -2]), exp)
def test_reindex(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse()
res = sparse.reindex(['A', 'C', 'B'])
exp = orig.reindex(['A', 'C', 'B']).to_sparse()
tm.assert_sp_frame_equal(res, exp)
orig = pd.DataFrame([[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse()
res = sparse.reindex(['A', 'C', 'B'])
exp = orig.reindex(['A', 'C', 'B']).to_sparse()
tm.assert_sp_frame_equal(res, exp)
def test_reindex_fill_value(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'C', 'B'])
exp = orig.reindex(['A', 'C', 'B']).to_sparse(fill_value=0)
tm.assert_sp_frame_equal(res, exp)
# all missing
orig = pd.DataFrame([[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'C', 'B'])
exp = orig.reindex(['A', 'C', 'B']).to_sparse(fill_value=0)
tm.assert_sp_frame_equal(res, exp)
# all fill_value
orig = pd.DataFrame([[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'C', 'B'])
exp = orig.reindex(['A', 'C', 'B']).to_sparse(fill_value=0)
tm.assert_sp_frame_equal(res, exp)
class TestMultitype(tm.TestCase):
def setUp(self):
self.cols = ['string', 'int', 'float', 'object']
self.string_series = pd.SparseSeries(['a', 'b', 'c'])
self.int_series = pd.SparseSeries([1, 2, 3])
self.float_series = pd.SparseSeries([1.1, 1.2, 1.3])
self.object_series = pd.SparseSeries([[], {}, set()])
self.sdf = pd.SparseDataFrame({
'string': self.string_series,
'int': self.int_series,
'float': self.float_series,
'object': self.object_series,
})
self.sdf = self.sdf[self.cols]
self.ss = pd.SparseSeries(['a', 1, 1.1, []], index=self.cols)
def test_frame_basic_dtypes(self):
for _, row in self.sdf.iterrows():
self.assertEqual(row.dtype, object)
tm.assert_sp_series_equal(self.sdf['string'], self.string_series,
check_names=False)
tm.assert_sp_series_equal(self.sdf['int'], self.int_series,
check_names=False)
tm.assert_sp_series_equal(self.sdf['float'], self.float_series,
check_names=False)
tm.assert_sp_series_equal(self.sdf['object'], self.object_series,
check_names=False)
def test_frame_indexing_single(self):
tm.assert_sp_series_equal(self.sdf.iloc[0],
pd.SparseSeries(['a', 1, 1.1, []],
index=self.cols),
check_names=False)
tm.assert_sp_series_equal(self.sdf.iloc[1],
pd.SparseSeries(['b', 2, 1.2, {}],
index=self.cols),
check_names=False)
tm.assert_sp_series_equal(self.sdf.iloc[2],
pd.SparseSeries(['c', 3, 1.3, set()],
index=self.cols),
check_names=False)
def test_frame_indexing_multiple(self):
tm.assert_sp_frame_equal(self.sdf, self.sdf[:])
tm.assert_sp_frame_equal(self.sdf, self.sdf.loc[:])
tm.assert_sp_frame_equal(self.sdf.iloc[[1, 2]],
pd.SparseDataFrame({
'string': self.string_series.iloc[[1, 2]],
'int': self.int_series.iloc[[1, 2]],
'float': self.float_series.iloc[[1, 2]],
'object': self.object_series.iloc[[1, 2]]
}, index=[1, 2])[self.cols])
tm.assert_sp_frame_equal(self.sdf[['int', 'string']],
pd.SparseDataFrame({
'int': self.int_series,
'string': self.string_series,
}))
def test_series_indexing_single(self):
for i, idx in enumerate(self.cols):
self.assertEqual(self.ss.iloc[i], self.ss[idx])
self.assertEqual(type(self.ss.iloc[i]),
type(self.ss[idx]))
self.assertEqual(self.ss['string'], 'a')
self.assertEqual(self.ss['int'], 1)
self.assertEqual(self.ss['float'], 1.1)
self.assertEqual(self.ss['object'], [])
def test_series_indexing_multiple(self):
tm.assert_sp_series_equal(self.ss.loc[['string', 'int']],
pd.SparseSeries(['a', 1],
index=['string', 'int']))
tm.assert_sp_series_equal(self.ss.loc[['string', 'object']],
pd.SparseSeries(['a', []],
index=['string', 'object']))
| gpl-3.0 |
stevenzhang18/Indeed-Flask | lib/pandas/core/base.py | 9 | 20057 | """
Base and utility classes for pandas objects.
"""
from pandas import compat
import numpy as np
from pandas.core import common as com
import pandas.core.nanops as nanops
import pandas.lib as lib
from pandas.util.decorators import Appender, cache_readonly, deprecate_kwarg
from pandas.core.common import AbstractMethodError
_shared_docs = dict()
_indexops_doc_kwargs = dict(klass='IndexOpsMixin', inplace='',
duplicated='IndexOpsMixin')
class StringMixin(object):
"""implements string methods so long as object defines a `__unicode__`
method.
Handles Python2/3 compatibility transparently.
"""
# side note - this could be made into a metaclass if more than one
# object needs
#----------------------------------------------------------------------
# Formatting
def __unicode__(self):
raise AbstractMethodError(self)
def __str__(self):
"""
Return a string representation for a particular Object
Invoked by str(df) in both py2/py3.
Yields Bytestring in Py2, Unicode String in py3.
"""
if compat.PY3:
return self.__unicode__()
return self.__bytes__()
def __bytes__(self):
"""
Return a string representation for a particular object.
Invoked by bytes(obj) in py3 only.
Yields a bytestring in both py2/py3.
"""
from pandas.core.config import get_option
encoding = get_option("display.encoding")
return self.__unicode__().encode(encoding, 'replace')
def __repr__(self):
"""
Return a string representation for a particular object.
Yields Bytestring in Py2, Unicode String in py3.
"""
return str(self)
class PandasObject(StringMixin):
"""baseclass for various pandas objects"""
@property
def _constructor(self):
"""class constructor (for this class it's just `__class__`"""
return self.__class__
def __unicode__(self):
"""
Return a string representation for a particular object.
Invoked by unicode(obj) in py2 only. Yields a Unicode String in both
py2/py3.
"""
# Should be overwritten by base classes
return object.__repr__(self)
def _dir_additions(self):
""" add addtional __dir__ for this object """
return set()
def _dir_deletions(self):
""" delete unwanted __dir__ for this object """
return set()
def __dir__(self):
"""
Provide method name lookup and completion
Only provide 'public' methods
"""
rv = set(dir(type(self)))
rv = (rv - self._dir_deletions()) | self._dir_additions()
return sorted(rv)
def _reset_cache(self, key=None):
"""
Reset cached properties. If ``key`` is passed, only clears that key.
"""
if getattr(self, '_cache', None) is None:
return
if key is None:
self._cache.clear()
else:
self._cache.pop(key, None)
class NoNewAttributesMixin(object):
"""Mixin which prevents adding new attributes.
Prevents additional attributes via xxx.attribute = "something" after a call to
`self.__freeze()`. Mainly used to prevent the user from using wrong attrirbutes
on a accessor (`Series.cat/.str/.dt`).
If you really want to add a new attribute at a later time, you need to use
`object.__setattr__(self, key, value)`.
"""
def _freeze(self):
"""Prevents setting additional attributes"""
object.__setattr__(self, "__frozen", True)
# prevent adding any attribute via s.xxx.new_attribute = ...
def __setattr__(self, key, value):
# _cache is used by a decorator
# dict lookup instead of getattr as getattr is false for getter which error
if getattr(self, "__frozen", False) and not (key in type(self).__dict__ or key == "_cache"):
raise AttributeError( "You cannot add any new attribute '{key}'".format(key=key))
object.__setattr__(self, key, value)
class PandasDelegate(PandasObject):
""" an abstract base class for delegating methods/properties """
def _delegate_property_get(self, name, *args, **kwargs):
raise TypeError("You cannot access the property {name}".format(name=name))
def _delegate_property_set(self, name, value, *args, **kwargs):
raise TypeError("The property {name} cannot be set".format(name=name))
def _delegate_method(self, name, *args, **kwargs):
raise TypeError("You cannot call method {name}".format(name=name))
@classmethod
def _add_delegate_accessors(cls, delegate, accessors, typ, overwrite=False):
"""
add accessors to cls from the delegate class
Parameters
----------
cls : the class to add the methods/properties to
delegate : the class to get methods/properties & doc-strings
acccessors : string list of accessors to add
typ : 'property' or 'method'
overwrite : boolean, default False
overwrite the method/property in the target class if it exists
"""
def _create_delegator_property(name):
def _getter(self):
return self._delegate_property_get(name)
def _setter(self, new_values):
return self._delegate_property_set(name, new_values)
_getter.__name__ = name
_setter.__name__ = name
return property(fget=_getter, fset=_setter, doc=getattr(delegate,name).__doc__)
def _create_delegator_method(name):
def f(self, *args, **kwargs):
return self._delegate_method(name, *args, **kwargs)
f.__name__ = name
f.__doc__ = getattr(delegate,name).__doc__
return f
for name in accessors:
if typ == 'property':
f = _create_delegator_property(name)
else:
f = _create_delegator_method(name)
# don't overwrite existing methods/properties
if overwrite or not hasattr(cls, name):
setattr(cls,name,f)
class AccessorProperty(object):
"""Descriptor for implementing accessor properties like Series.str
"""
def __init__(self, accessor_cls, construct_accessor):
self.accessor_cls = accessor_cls
self.construct_accessor = construct_accessor
self.__doc__ = accessor_cls.__doc__
def __get__(self, instance, owner=None):
if instance is None:
# this ensures that Series.str.<method> is well defined
return self.accessor_cls
return self.construct_accessor(instance)
def __set__(self, instance, value):
raise AttributeError("can't set attribute")
def __delete__(self, instance):
raise AttributeError("can't delete attribute")
class FrozenList(PandasObject, list):
"""
Container that doesn't allow setting item *but*
because it's technically non-hashable, will be used
for lookups, appropriately, etc.
"""
# Sidenote: This has to be of type list, otherwise it messes up PyTables
# typechecks
def __add__(self, other):
if isinstance(other, tuple):
other = list(other)
return self.__class__(super(FrozenList, self).__add__(other))
__iadd__ = __add__
# Python 2 compat
def __getslice__(self, i, j):
return self.__class__(super(FrozenList, self).__getslice__(i, j))
def __getitem__(self, n):
# Python 3 compat
if isinstance(n, slice):
return self.__class__(super(FrozenList, self).__getitem__(n))
return super(FrozenList, self).__getitem__(n)
def __radd__(self, other):
if isinstance(other, tuple):
other = list(other)
return self.__class__(other + list(self))
def __eq__(self, other):
if isinstance(other, (tuple, FrozenList)):
other = list(other)
return super(FrozenList, self).__eq__(other)
__req__ = __eq__
def __mul__(self, other):
return self.__class__(super(FrozenList, self).__mul__(other))
__imul__ = __mul__
def __reduce__(self):
return self.__class__, (list(self),)
def __hash__(self):
return hash(tuple(self))
def _disabled(self, *args, **kwargs):
"""This method will not function because object is immutable."""
raise TypeError("'%s' does not support mutable operations." %
self.__class__.__name__)
def __unicode__(self):
from pandas.core.common import pprint_thing
return pprint_thing(self, quote_strings=True,
escape_chars=('\t', '\r', '\n'))
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__,
str(self))
__setitem__ = __setslice__ = __delitem__ = __delslice__ = _disabled
pop = append = extend = remove = sort = insert = _disabled
class FrozenNDArray(PandasObject, np.ndarray):
# no __array_finalize__ for now because no metadata
def __new__(cls, data, dtype=None, copy=False):
if copy is None:
copy = not isinstance(data, FrozenNDArray)
res = np.array(data, dtype=dtype, copy=copy).view(cls)
return res
def _disabled(self, *args, **kwargs):
"""This method will not function because object is immutable."""
raise TypeError("'%s' does not support mutable operations." %
self.__class__)
__setitem__ = __setslice__ = __delitem__ = __delslice__ = _disabled
put = itemset = fill = _disabled
def _shallow_copy(self):
return self.view()
def values(self):
"""returns *copy* of underlying array"""
arr = self.view(np.ndarray).copy()
return arr
def __unicode__(self):
"""
Return a string representation for this object.
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
prepr = com.pprint_thing(self, escape_chars=('\t', '\r', '\n'),
quote_strings=True)
return "%s(%s, dtype='%s')" % (type(self).__name__, prepr, self.dtype)
class IndexOpsMixin(object):
""" common ops mixin to support a unified inteface / docs for Series / Index """
# ndarray compatibility
__array_priority__ = 1000
def transpose(self):
""" return the transpose, which is by definition self """
return self
T = property(transpose, doc="return the transpose, which is by definition self")
@property
def shape(self):
""" return a tuple of the shape of the underlying data """
return self.values.shape
@property
def ndim(self):
""" return the number of dimensions of the underlying data, by definition 1 """
return 1
def item(self):
""" return the first element of the underlying data as a python scalar """
try:
return self.values.item()
except IndexError:
# copy numpy's message here because Py26 raises an IndexError
raise ValueError('can only convert an array of size 1 to a '
'Python scalar')
@property
def data(self):
""" return the data pointer of the underlying data """
return self.values.data
@property
def itemsize(self):
""" return the size of the dtype of the item of the underlying data """
return self.values.itemsize
@property
def nbytes(self):
""" return the number of bytes in the underlying data """
return self.values.nbytes
@property
def strides(self):
""" return the strides of the underlying data """
return self.values.strides
@property
def size(self):
""" return the number of elements in the underlying data """
return self.values.size
@property
def flags(self):
""" return the ndarray.flags for the underlying data """
return self.values.flags
@property
def base(self):
""" return the base object if the memory of the underlying data is shared """
return self.values.base
@property
def _values(self):
""" the internal implementation """
return self.values
def max(self):
""" The maximum value of the object """
return nanops.nanmax(self.values)
def argmax(self, axis=None):
"""
return a ndarray of the maximum argument indexer
See also
--------
numpy.ndarray.argmax
"""
return nanops.nanargmax(self.values)
def min(self):
""" The minimum value of the object """
return nanops.nanmin(self.values)
def argmin(self, axis=None):
"""
return a ndarray of the minimum argument indexer
See also
--------
numpy.ndarray.argmin
"""
return nanops.nanargmin(self.values)
@cache_readonly
def hasnans(self):
""" return if I have any nans; enables various perf speedups """
return com.isnull(self).any()
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
""" perform the reduction type operation if we can """
func = getattr(self,name,None)
if func is None:
raise TypeError("{klass} cannot perform the operation {op}".format(klass=self.__class__.__name__,op=name))
return func(**kwds)
def value_counts(self, normalize=False, sort=True, ascending=False,
bins=None, dropna=True):
"""
Returns object containing counts of unique values.
The resulting object will be in descending order so that the
first element is the most frequently-occurring element.
Excludes NA values by default.
Parameters
----------
normalize : boolean, default False
If True then the object returned will contain the relative
frequencies of the unique values.
sort : boolean, default True
Sort by values
ascending : boolean, default False
Sort in ascending order
bins : integer, optional
Rather than count values, group them into half-open bins,
a convenience for pd.cut, only works with numeric data
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
"""
from pandas.core.algorithms import value_counts
from pandas.tseries.api import DatetimeIndex, PeriodIndex
result = value_counts(self, sort=sort, ascending=ascending,
normalize=normalize, bins=bins, dropna=dropna)
if isinstance(self, PeriodIndex):
# preserve freq
result.index = self._simple_new(result.index.values,
freq=self.freq)
elif isinstance(self, DatetimeIndex):
result.index = self._simple_new(result.index.values,
tz=getattr(self, 'tz', None))
return result
def unique(self):
"""
Return array of unique values in the object. Significantly faster than
numpy.unique. Includes NA values.
Returns
-------
uniques : ndarray
"""
from pandas.core.nanops import unique1d
values = self.values
if hasattr(values,'unique'):
return values.unique()
return unique1d(values)
def nunique(self, dropna=True):
"""
Return number of unique elements in the object.
Excludes NA values by default.
Parameters
----------
dropna : boolean, default True
Don't include NaN in the count.
Returns
-------
nunique : int
"""
uniqs = self.unique()
n = len(uniqs)
if dropna and com.isnull(uniqs).any():
n -= 1
return n
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
if hasattr(self.values,'memory_usage'):
return self.values.memory_usage(deep=deep)
v = self.values.nbytes
if deep and com.is_object_dtype(self):
v += lib.memory_usage_of_objects(self.values)
return v
def factorize(self, sort=False, na_sentinel=-1):
"""
Encode the object as an enumerated type or categorical variable
Parameters
----------
sort : boolean, default False
Sort by values
na_sentinel: int, default -1
Value to mark "not found"
Returns
-------
labels : the indexer to the original array
uniques : the unique Index
"""
from pandas.core.algorithms import factorize
return factorize(self, sort=sort, na_sentinel=na_sentinel)
def searchsorted(self, key, side='left'):
""" np.ndarray searchsorted compat """
### FIXME in GH7447
#### needs coercion on the key (DatetimeIndex does alreay)
#### needs tests/doc-string
return self.values.searchsorted(key, side=side)
_shared_docs['drop_duplicates'] = (
"""Return %(klass)s with duplicate values removed
Parameters
----------
keep : {'first', 'last', False}, default 'first'
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
take_last : deprecated
%(inplace)s
Returns
-------
deduplicated : %(klass)s
""")
@deprecate_kwarg('take_last', 'keep', mapping={True: 'last', False: 'first'})
@Appender(_shared_docs['drop_duplicates'] % _indexops_doc_kwargs)
def drop_duplicates(self, keep='first', inplace=False):
duplicated = self.duplicated(keep=keep)
result = self[np.logical_not(duplicated)]
if inplace:
return self._update_inplace(result)
else:
return result
_shared_docs['duplicated'] = (
"""Return boolean %(duplicated)s denoting duplicate values
Parameters
----------
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the last occurrence.
- False : Mark all duplicates as ``True``.
take_last : deprecated
Returns
-------
duplicated : %(duplicated)s
""")
@deprecate_kwarg('take_last', 'keep', mapping={True: 'last', False: 'first'})
@Appender(_shared_docs['duplicated'] % _indexops_doc_kwargs)
def duplicated(self, keep='first'):
keys = com._values_from_object(com._ensure_object(self.values))
duplicated = lib.duplicated(keys, keep=keep)
try:
return self._constructor(duplicated,
index=self.index).__finalize__(self)
except AttributeError:
return np.array(duplicated, dtype=bool)
#----------------------------------------------------------------------
# abstracts
def _update_inplace(self, result, **kwargs):
raise AbstractMethodError(self)
| apache-2.0 |
nan86150/ImageFusion | lib/python2.7/site-packages/matplotlib/table.py | 11 | 17551 | """
Place a table below the x-axis at location loc.
The table consists of a grid of cells.
The grid need not be rectangular and can have holes.
Cells are added by specifying their row and column.
For the purposes of positioning the cell at (0, 0) is
assumed to be at the top left and the cell at (max_row, max_col)
is assumed to be at bottom right.
You can add additional cells outside this range to have convenient
ways of positioning more interesting grids.
Author : John Gill <[email protected]>
Copyright : 2004 John Gill and John Hunter
License : matplotlib license
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import warnings
from . import artist
from .artist import Artist, allow_rasterization
from .patches import Rectangle
from .cbook import is_string_like
from matplotlib import docstring
from .text import Text
from .transforms import Bbox
class Cell(Rectangle):
"""
A cell is a Rectangle with some associated text.
"""
PAD = 0.1 # padding between text and rectangle
def __init__(self, xy, width, height,
edgecolor='k', facecolor='w',
fill=True,
text='',
loc=None,
fontproperties=None
):
# Call base
Rectangle.__init__(self, xy, width=width, height=height,
edgecolor=edgecolor, facecolor=facecolor)
self.set_clip_on(False)
# Create text object
if loc is None:
loc = 'right'
self._loc = loc
self._text = Text(x=xy[0], y=xy[1], text=text,
fontproperties=fontproperties)
self._text.set_clip_on(False)
def set_transform(self, trans):
Rectangle.set_transform(self, trans)
# the text does not get the transform!
def set_figure(self, fig):
Rectangle.set_figure(self, fig)
self._text.set_figure(fig)
def get_text(self):
'Return the cell Text intance'
return self._text
def set_fontsize(self, size):
self._text.set_fontsize(size)
def get_fontsize(self):
'Return the cell fontsize'
return self._text.get_fontsize()
def auto_set_font_size(self, renderer):
""" Shrink font size until text fits. """
fontsize = self.get_fontsize()
required = self.get_required_width(renderer)
while fontsize > 1 and required > self.get_width():
fontsize -= 1
self.set_fontsize(fontsize)
required = self.get_required_width(renderer)
return fontsize
@allow_rasterization
def draw(self, renderer):
if not self.get_visible():
return
# draw the rectangle
Rectangle.draw(self, renderer)
# position the text
self._set_text_position(renderer)
self._text.draw(renderer)
def _set_text_position(self, renderer):
""" Set text up so it draws in the right place.
Currently support 'left', 'center' and 'right'
"""
bbox = self.get_window_extent(renderer)
l, b, w, h = bbox.bounds
# draw in center vertically
self._text.set_verticalalignment('center')
y = b + (h / 2.0)
# now position horizontally
if self._loc == 'center':
self._text.set_horizontalalignment('center')
x = l + (w / 2.0)
elif self._loc == 'left':
self._text.set_horizontalalignment('left')
x = l + (w * self.PAD)
else:
self._text.set_horizontalalignment('right')
x = l + (w * (1.0 - self.PAD))
self._text.set_position((x, y))
def get_text_bounds(self, renderer):
""" Get text bounds in axes co-ordinates. """
bbox = self._text.get_window_extent(renderer)
bboxa = bbox.inverse_transformed(self.get_data_transform())
return bboxa.bounds
def get_required_width(self, renderer):
""" Get width required for this cell. """
l, b, w, h = self.get_text_bounds(renderer)
return w * (1.0 + (2.0 * self.PAD))
def set_text_props(self, **kwargs):
'update the text properties with kwargs'
self._text.update(kwargs)
class Table(Artist):
"""
Create a table of cells.
Table can have (optional) row and column headers.
Each entry in the table can be either text or patches.
Column widths and row heights for the table can be specifified.
Return value is a sequence of text, line and patch instances that make
up the table
"""
codes = {'best': 0,
'upper right': 1, # default
'upper left': 2,
'lower left': 3,
'lower right': 4,
'center left': 5,
'center right': 6,
'lower center': 7,
'upper center': 8,
'center': 9,
'top right': 10,
'top left': 11,
'bottom left': 12,
'bottom right': 13,
'right': 14,
'left': 15,
'top': 16,
'bottom': 17,
}
FONTSIZE = 10
AXESPAD = 0.02 # the border between the axes and table edge
def __init__(self, ax, loc=None, bbox=None, **kwargs):
Artist.__init__(self)
if is_string_like(loc) and loc not in self.codes:
warnings.warn('Unrecognized location %s. Falling back on '
'bottom; valid locations are\n%s\t' %
(loc, '\n\t'.join(six.iterkeys(self.codes))))
loc = 'bottom'
if is_string_like(loc):
loc = self.codes.get(loc, 1)
self.set_figure(ax.figure)
self._axes = ax
self._loc = loc
self._bbox = bbox
# use axes coords
self.set_transform(ax.transAxes)
self._texts = []
self._cells = {}
self._autoRows = []
self._autoColumns = []
self._autoFontsize = True
self.update(kwargs)
self.set_clip_on(False)
self._cachedRenderer = None
def add_cell(self, row, col, *args, **kwargs):
""" Add a cell to the table. """
xy = (0, 0)
cell = Cell(xy, *args, **kwargs)
cell.set_figure(self.figure)
cell.set_transform(self.get_transform())
cell.set_clip_on(False)
self._cells[(row, col)] = cell
def _approx_text_height(self):
return (self.FONTSIZE / 72.0 * self.figure.dpi /
self._axes.bbox.height * 1.2)
@allow_rasterization
def draw(self, renderer):
# Need a renderer to do hit tests on mouseevent; assume the last one
# will do
if renderer is None:
renderer = self._cachedRenderer
if renderer is None:
raise RuntimeError('No renderer defined')
self._cachedRenderer = renderer
if not self.get_visible():
return
renderer.open_group('table')
self._update_positions(renderer)
keys = list(six.iterkeys(self._cells))
keys.sort()
for key in keys:
self._cells[key].draw(renderer)
#for c in self._cells.itervalues():
# c.draw(renderer)
renderer.close_group('table')
def _get_grid_bbox(self, renderer):
"""Get a bbox, in axes co-ordinates for the cells.
Only include those in the range (0,0) to (maxRow, maxCol)"""
boxes = [self._cells[pos].get_window_extent(renderer)
for pos in six.iterkeys(self._cells)
if pos[0] >= 0 and pos[1] >= 0]
bbox = Bbox.union(boxes)
return bbox.inverse_transformed(self.get_transform())
def contains(self, mouseevent):
"""Test whether the mouse event occurred in the table.
Returns T/F, {}
"""
if six.callable(self._contains):
return self._contains(self, mouseevent)
# TODO: Return index of the cell containing the cursor so that the user
# doesn't have to bind to each one individually.
if self._cachedRenderer is not None:
boxes = [self._cells[pos].get_window_extent(self._cachedRenderer)
for pos in six.iterkeys(self._cells)
if pos[0] >= 0 and pos[1] >= 0]
bbox = Bbox.union(boxes)
return bbox.contains(mouseevent.x, mouseevent.y), {}
else:
return False, {}
def get_children(self):
'Return the Artists contained by the table'
return list(six.itervalues(self._cells))
get_child_artists = get_children # backward compatibility
def get_window_extent(self, renderer):
'Return the bounding box of the table in window coords'
boxes = [cell.get_window_extent(renderer)
for cell in six.itervalues(self._cells)]
return Bbox.union(boxes)
def _do_cell_alignment(self):
""" Calculate row heights and column widths.
Position cells accordingly.
"""
# Calculate row/column widths
widths = {}
heights = {}
for (row, col), cell in six.iteritems(self._cells):
height = heights.setdefault(row, 0.0)
heights[row] = max(height, cell.get_height())
width = widths.setdefault(col, 0.0)
widths[col] = max(width, cell.get_width())
# work out left position for each column
xpos = 0
lefts = {}
cols = list(six.iterkeys(widths))
cols.sort()
for col in cols:
lefts[col] = xpos
xpos += widths[col]
ypos = 0
bottoms = {}
rows = list(six.iterkeys(heights))
rows.sort()
rows.reverse()
for row in rows:
bottoms[row] = ypos
ypos += heights[row]
# set cell positions
for (row, col), cell in six.iteritems(self._cells):
cell.set_x(lefts[col])
cell.set_y(bottoms[row])
def auto_set_column_width(self, col):
self._autoColumns.append(col)
def _auto_set_column_width(self, col, renderer):
""" Automagically set width for column.
"""
cells = [key for key in self._cells if key[1] == col]
# find max width
width = 0
for cell in cells:
c = self._cells[cell]
width = max(c.get_required_width(renderer), width)
# Now set the widths
for cell in cells:
self._cells[cell].set_width(width)
def auto_set_font_size(self, value=True):
""" Automatically set font size. """
self._autoFontsize = value
def _auto_set_font_size(self, renderer):
if len(self._cells) == 0:
return
fontsize = list(six.itervalues(self._cells))[0].get_fontsize()
cells = []
for key, cell in six.iteritems(self._cells):
# ignore auto-sized columns
if key[1] in self._autoColumns:
continue
size = cell.auto_set_font_size(renderer)
fontsize = min(fontsize, size)
cells.append(cell)
# now set all fontsizes equal
for cell in six.itervalues(self._cells):
cell.set_fontsize(fontsize)
def scale(self, xscale, yscale):
""" Scale column widths by xscale and row heights by yscale. """
for c in six.itervalues(self._cells):
c.set_width(c.get_width() * xscale)
c.set_height(c.get_height() * yscale)
def set_fontsize(self, size):
"""
Set the fontsize of the cell text
ACCEPTS: a float in points
"""
for cell in six.itervalues(self._cells):
cell.set_fontsize(size)
def _offset(self, ox, oy):
'Move all the artists by ox,oy (axes coords)'
for c in six.itervalues(self._cells):
x, y = c.get_x(), c.get_y()
c.set_x(x + ox)
c.set_y(y + oy)
def _update_positions(self, renderer):
# called from renderer to allow more precise estimates of
# widths and heights with get_window_extent
# Do any auto width setting
for col in self._autoColumns:
self._auto_set_column_width(col, renderer)
if self._autoFontsize:
self._auto_set_font_size(renderer)
# Align all the cells
self._do_cell_alignment()
bbox = self._get_grid_bbox(renderer)
l, b, w, h = bbox.bounds
if self._bbox is not None:
# Position according to bbox
rl, rb, rw, rh = self._bbox
self.scale(rw / w, rh / h)
ox = rl - l
oy = rb - b
self._do_cell_alignment()
else:
# Position using loc
(BEST, UR, UL, LL, LR, CL, CR, LC, UC, C,
TR, TL, BL, BR, R, L, T, B) = list(xrange(len(self.codes)))
# defaults for center
ox = (0.5 - w / 2) - l
oy = (0.5 - h / 2) - b
if self._loc in (UL, LL, CL): # left
ox = self.AXESPAD - l
if self._loc in (BEST, UR, LR, R, CR): # right
ox = 1 - (l + w + self.AXESPAD)
if self._loc in (BEST, UR, UL, UC): # upper
oy = 1 - (b + h + self.AXESPAD)
if self._loc in (LL, LR, LC): # lower
oy = self.AXESPAD - b
if self._loc in (LC, UC, C): # center x
ox = (0.5 - w / 2) - l
if self._loc in (CL, CR, C): # center y
oy = (0.5 - h / 2) - b
if self._loc in (TL, BL, L): # out left
ox = - (l + w)
if self._loc in (TR, BR, R): # out right
ox = 1.0 - l
if self._loc in (TR, TL, T): # out top
oy = 1.0 - b
if self._loc in (BL, BR, B): # out bottom
oy = - (b + h)
self._offset(ox, oy)
def get_celld(self):
'return a dict of cells in the table'
return self._cells
def table(ax,
cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None,
**kwargs):
"""
TABLE(cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None)
Factory function to generate a Table instance.
Thanks to John Gill for providing the class and table.
"""
# Check we have some cellText
if cellText is None:
# assume just colours are needed
rows = len(cellColours)
cols = len(cellColours[0])
cellText = [[''] * rows] * cols
rows = len(cellText)
cols = len(cellText[0])
for row in cellText:
assert len(row) == cols
if cellColours is not None:
assert len(cellColours) == rows
for row in cellColours:
assert len(row) == cols
else:
cellColours = ['w' * cols] * rows
# Set colwidths if not given
if colWidths is None:
colWidths = [1.0 / cols] * cols
# Fill in missing information for column
# and row labels
rowLabelWidth = 0
if rowLabels is None:
if rowColours is not None:
rowLabels = [''] * rows
rowLabelWidth = colWidths[0]
elif rowColours is None:
rowColours = 'w' * rows
if rowLabels is not None:
assert len(rowLabels) == rows
# If we have column labels, need to shift
# the text and colour arrays down 1 row
offset = 1
if colLabels is None:
if colColours is not None:
colLabels = [''] * cols
else:
offset = 0
elif colColours is None:
colColours = 'w' * cols
if rowLabels is not None:
assert len(rowLabels) == rows
# Set up cell colours if not given
if cellColours is None:
cellColours = ['w' * cols] * rows
# Now create the table
table = Table(ax, loc, bbox, **kwargs)
height = table._approx_text_height()
# Add the cells
for row in xrange(rows):
for col in xrange(cols):
table.add_cell(row + offset, col,
width=colWidths[col], height=height,
text=cellText[row][col],
facecolor=cellColours[row][col],
loc=cellLoc)
# Do column labels
if colLabels is not None:
for col in xrange(cols):
table.add_cell(0, col,
width=colWidths[col], height=height,
text=colLabels[col], facecolor=colColours[col],
loc=colLoc)
# Do row labels
if rowLabels is not None:
for row in xrange(rows):
table.add_cell(row + offset, -1,
width=rowLabelWidth or 1e-15, height=height,
text=rowLabels[row], facecolor=rowColours[row],
loc=rowLoc)
if rowLabelWidth == 0:
table.auto_set_column_width(-1)
ax.add_table(table)
return table
docstring.interpd.update(Table=artist.kwdoc(Table))
| mit |
mrshu/scikit-learn | sklearn/utils/tests/test_utils.py | 10 | 3595 | import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import pinv2
from nose.tools import assert_equal, assert_raises, assert_true
from numpy.testing import assert_almost_equal
from sklearn.utils import check_random_state
from sklearn.utils import deprecated
from sklearn.utils import resample
from sklearn.utils import safe_mask
from sklearn.utils.extmath import pinvh
def test_make_rng():
"""Check the check_random_state utility function behavior"""
assert_true(check_random_state(None) is np.random.mtrand._rand)
assert_true(check_random_state(np.random) is np.random.mtrand._rand)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(42).randint(100) == rng_42.randint(100))
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(rng_42) is rng_42)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(43).randint(100) != rng_42.randint(100))
assert_raises(ValueError, check_random_state, "some invalid seed")
def test_resample_noarg():
"""Border case not worth mentioning in doctests"""
assert_true(resample() is None)
def test_deprecated():
"""Test whether the deprecated decorator issues appropriate warnings"""
# Copied almost verbatim from http://docs.python.org/library/warnings.html
# First a function...
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated()
def ham():
return "spam"
spam = ham()
assert_equal(spam, "spam") # function must remain usable
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
# ... then a class.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated("don't use this")
class Ham(object):
SPAM = 1
ham = Ham()
assert_true(hasattr(ham, "SPAM"))
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
def test_resample_value_errors():
"""Check that invalid arguments yield ValueError"""
assert_raises(ValueError, resample, [0], [0, 1])
assert_raises(ValueError, resample, [0, 1], [0, 1], n_samples=3)
assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42)
def test_safe_mask():
random_state = check_random_state(0)
X = random_state.rand(5, 4)
X_csr = sp.csr_matrix(X)
mask = [False, False, True, True, True]
mask = safe_mask(X, mask)
assert_equal(X[mask].shape[0], 3)
mask = safe_mask(X_csr, mask)
assert_equal(X_csr[mask].shape[0], 3)
def test_pinvh_simple_real():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=np.float64)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_pinvh_nonpositive():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_almost_equal(a_pinv, a_pinvh)
def test_pinvh_simple_complex():
a = (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
+ 1j * np.array([[10, 8, 7], [6, 5, 4], [3, 2, 1]]))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
| bsd-3-clause |
Barmaley-exe/scikit-learn | examples/mixture/plot_gmm_selection.py | 248 | 3223 | """
=================================
Gaussian Mixture Model Selection
=================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
print(__doc__)
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
| bsd-3-clause |
ashhher3/scikit-learn | sklearn/manifold/tests/test_isomap.py | 28 | 4007 | from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from sklearn.utils.testing import assert_less
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.samples_generator.make_s_curve(n_samples, random_state=0)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale)
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
gilliM/quality | apexquality/apex_quality.py | 1 | 19580 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
ApexQuality
A QGIS plugin
Automatic quality assessent of APEX data
-------------------
begin : 2016-03-11
git sha : $Format:%H$
copyright : (C) 2016 by RSL
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtCore import QSettings, QTranslator, qVersion, QCoreApplication, QUrl, QFileInfo, Qt
from PyQt4.QtGui import QAction, QIcon, QToolButton, QMenu, QDesktopServices, QKeySequence, QColor
# Initialize Qt resources from file resources.py
import resources # @UnusedImport
import os.path
from matplotlib import pyplot as plt
from sklearn.decomposition import TruncatedSVD
from sklearn import mixture
import numpy as np
from qgis import utils as qgis_utils
from qgis import core as qgis_core
from qgis import gui as qgis_gui
from osgeo import gdal
from matplotlib.backends.backend_pdf import PdfPages
from pandas.tools.plotting import table
from pandas import DataFrame
import spectral
from spectral_utils import getSubset
from pyplot_widget import pyPlotWidget
import customization
from kmeans_widget import KMeanWidget
from qgis_spectral_tool import SpectralTool
class ApexQuality:
"""QGIS Plugin Implementation."""
def __init__(self, iface):
"""Constructor."""
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
locale = QSettings().value('locale/userLocale')[0:2]
locale_path = os.path.join(
self.plugin_dir,
'i18n',
'ApexQuality_{}.qm'.format(locale))
if os.path.exists(locale_path):
self.translator = QTranslator()
self.translator.load(locale_path)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
# Declare instance attributes
self.actions = []
self.menu = self.tr(u'&Apex Quality Assessment')
self.spectralTool = SpectralTool(self.iface.mapCanvas())
self.path = os.path.dirname(os.path.realpath(__file__))
# noinspection PyMethodMayBeStatic
def tr(self, message):
"""Get the translation for a string using Qt translation API.
We implement this ourselves since we do not inherit QObject.
:param message: String for translation.
:type message: str, QString
:returns: Translated version of message.
:rtype: QString
"""
# noinspection PyTypeChecker,PyArgumentList,PyCallByClass
return QCoreApplication.translate('ApexQuality', message)
def initGui(self):
"""Create the menu entries and toolbar icons inside the QGIS GUI."""
icon_path = ':/plugins/ApexQuality/icon.png'
self.action1 = QAction(QIcon(icon_path), u"Unsuperviseds classification", self.iface.mainWindow())
self.action2 = QAction(QIcon(icon_path), u"Spectral tool", self.iface.mainWindow())
self.action3 = QAction(QIcon(icon_path), u"Help", self.iface.mainWindow())
self.action1.setShortcut(QKeySequence(Qt.SHIFT + Qt.CTRL + Qt.Key_Y))
self.actions.append(self.action1)
self.actions.append(self.action2)
self.actions.append(self.action3)
self.popupMenu = QMenu(self.iface.mainWindow())
self.popupMenu.addAction(self.action1)
self.popupMenu.addAction(self.action2)
self.popupMenu.addSeparator()
self.popupMenu.addAction(self.action3)
self.action1.triggered.connect(self.someMethod1)
self.action2.triggered.connect(self.someMethod2)
self.action3.triggered.connect(self.someMethod3)
self.toolButton = QToolButton()
self.toolButton.setMenu(self.popupMenu)
self.toolButton.setDefaultAction(self.action1)
self.toolButton.setPopupMode(QToolButton.InstantPopup)
self.toolbar1 = self.iface.addToolBarWidget(self.toolButton)
def someMethod1(self):
dialog = KMeanWidget()
ok = dialog.exec_()
if not ok:
return
filePath = self.getCurrentImage()
if filePath is None:
qgis_utils.iface.messageBar().pushMessage("Error",
"No Raster selected", level = qgis_gui.QgsMessageBar.CRITICAL, duration = 5)
return
n_class = dialog.classSpinBox.value()
cmap = plt.get_cmap('gist_rainbow')
colors = [cmap(i) for i in np.linspace(0, 1, n_class)]
if dialog.restrictedRB.isChecked():
data, xMin, yMax = getSubset(filePath)
if data is None:
qgis_utils.iface.messageBar().pushMessage("Error",
"Problem of extend", level = qgis_gui.QgsMessageBar.CRITICAL, duration = 5)
return
g = mixture.GMM(n_components = n_class)
h, w, n_b = data.shape
data = data.reshape(-1, n_b)
pca = TruncatedSVD(n_components = dialog.nCompSvdSpinBox.value())
data_cp = pca.fit_transform(data)
m = g.fit_predict(data_cp)
indicator = np.max(g.predict_proba(data_cp), axis = 1)
indicator = np.reshape(indicator, (h, w))
m = np.reshape(m, (h, w))
c = pca.inverse_transform(g.means_)
c_covar = pca.inverse_transform(g.covars_)
else:
img = spectral.open_image(filePath.replace('.bsq', '.hdr'))
data = img.load()
xMin = 0; yMax = 0
g = mixture.GMM(n_components = n_class)
h, w, n_b = data.shape
data = data.reshape(-1, n_b)
subset = data[np.random.choice(data.shape[0], 100000)]
pca = TruncatedSVD(n_components = dialog.nCompSvdSpinBox.value())
pca.fit(subset)
subset_pc = pca.transform(subset)
data_pc = pca.transform(data)
g.fit(subset_pc)
m = g.predict(data_pc)
indicator = np.max(g.predict_proba(data_pc), axis = 1)
indicator = np.reshape(indicator, (h, w))
m = np.reshape(m, (h, w))
c = pca.inverse_transform(g.means_)
c_covar = pca.inverse_transform(g.covars_)
if dialog.pyplotCB.isChecked():
self.c_plot = pyPlotWidget()
ax = self.c_plot.figure.add_subplot(431)
ax.hold(1)
for i in range(c.shape[0]):
ax.plot(g.means_[i], color = colors[i])
ax = self.c_plot.figure.add_subplot(432)
for i in range(c.shape[0]):
ax.plot(g.covars_[i], color = colors[i])
ax = self.c_plot.figure.add_subplot(434)
for i in range(c.shape[0]):
ax.plot(c[i], color = colors[i])
ax.set_ylim([0, 1])
ax = self.c_plot.figure.add_subplot(435)
for i in range(c.shape[0]):
ax.plot((c_covar[i]), color = colors[i])
ax.hold(0)
uniqu = np.unique(m)
ax = self.c_plot.figure.add_subplot(437)
ax.imshow(m, cmap = cmap , vmin = np.min(uniqu), vmax = np.max(uniqu))
ax = self.c_plot.figure.add_subplot(438)
imbar = ax.imshow(indicator, cmap = plt.cm.hot) # @UndefinedVariable
self.c_plot.figure.colorbar(imbar)
ax = self.c_plot.figure.add_subplot(4, 3, 10)
for i in range(c.shape[0]):
ax.plot([0], [0], color = colors[i], label = i)
ax.legend()
ax.axis('off')
ax = self.c_plot.figure.add_subplot(4, 3, 11)
for i in range(c.shape[0]):
ax.plot((c_covar[i] / c[i]), color = colors[i])
ax.hold(0)
self.c_plot.canvas.draw();
self.c_plot.show();
self.c_plot.raise_()
class_list = []
for class_i in range(np.min(uniqu), np.max(uniqu) + 1):
class_list.append(np.reshape(m == class_i, (-1)))
colors = [cmap(i) for i in np.linspace(0, 1, np.max(uniqu) + 1 - np.min(uniqu))]
ax = self.c_plot.figure.add_subplot(1, 3, 3)
for j, class_i in enumerate(range(n_class)):
# ax = self.c_plot.figure.add_subplot(np.max(uniqu) + 1 - np.min(uniqu), 3, 3 * (j + 1))
bool_i = class_list[j]
data_class = data[bool_i, :]
# ax.axis('off')
results, headers = customization.compute_stats_per_class(data_class)
color = colors[j]
ax.plot(j + results[1], '-', color = color)
ax.plot(j + results[1] + results[3], '-', color = color)
ax.plot(j + results[1] - results[3], '-', color = color)
ax.plot(j + results[0], '-.', color = color)
ax.plot(j + results[2], '--', color = color)
ax.set_ylim([0, j + 1])
ax.set_axis_off()
ax.set_frame_on(True)
ax.set_axis_bgcolor('w')
# ax.plot([0], [0], 'k-', label = 'mean')
# ax.plot([0], [0], 'k--', label = 'max')
# ax.plot([0], [0], 'k-.', label = 'min')
# ax.legend()
self.c_plot.figure.subplots_adjust(left = 0.02, right = 0.98, top = 0.98, bottom = 0.1, wspace = 0.05, hspace = 0.05)
self.c_plot.canvas.draw();
self.c_plot.showMaximized();
self.c_plot.exec_()
if dialog.geotiffCB.isChecked():
dataset1 = gdal.Open(filePath)
geoTransform = list(dataset1.GetGeoTransform())
geoTransform[0] += (xMin * geoTransform[1])
if geoTransform[5] > 0: geoTransform[5] *= -1
geoTransform[3] += (yMax * geoTransform[5])
r_save = np.array(m, dtype = np.uint8)
r_save = np.reshape(r_save, (r_save.shape[0], r_save.shape[1], 1))
self.WriteGeotiffNBand(r_save, self.path + '/temp/temp.tiff', gdal.GDT_Byte, geoTransform, dataset1.GetProjection())
fileInfo = QFileInfo(self.path + '/temp/test.tiff')
baseName = fileInfo.baseName()
rlayer = qgis_core.QgsRasterLayer(self.path + '/temp/temp.tiff', baseName)
fcn = qgis_core.QgsColorRampShader()
fcn.setColorRampType(qgis_core.QgsColorRampShader.EXACT)
lst = [ qgis_core.QgsColorRampShader.ColorRampItem(j, QColor(colors[j][0] * 255, colors[j][1] * 255, colors[j][2] * 255)) for j in range(n_class) ]
fcn.setColorRampItemList(lst)
shader = qgis_core.QgsRasterShader()
shader.setRasterShaderFunction(fcn)
renderer = qgis_core.QgsSingleBandPseudoColorRenderer(rlayer.dataProvider(), 1, shader)
rlayer.setRenderer(renderer)
qgis_core.QgsMapLayerRegistry.instance().addMapLayer(rlayer)
r_save = np.array(indicator, dtype = np.float32)
r_save = np.reshape(r_save, (r_save.shape[0], r_save.shape[1], 1))
self.WriteGeotiffNBand(r_save, self.path + '/temp/temp_indicator.tiff', gdal.GDT_Float32, geoTransform, dataset1.GetProjection())
fileInfo = QFileInfo(self.path + '/temp/test.tiff')
baseName = fileInfo.baseName()
rlayer = qgis_core.QgsRasterLayer(self.path + '/temp/temp_indicator.tiff', baseName)
qgis_core.QgsMapLayerRegistry.instance().addMapLayer(rlayer)
if dialog.pdfCB.isChecked():
outputFile = self.path + '/temp/test.pdf'
with PdfPages(outputFile) as pdf:
c_plot = pyPlotWidget()
ax = c_plot.figure.add_subplot(221)
ax.set_title('SVD Classes')
ax.hold(1)
for i in range(c.shape[0]):
ax.plot(g.means_[i], color = colors[i]) # @UndefinedVariable)
ax = c_plot.figure.add_subplot(222)
ax.set_title('SVD variances')
for i in range(c.shape[0]):
ax.plot(g.covars_[i], color = colors[i]) # @UndefinedVariable
ax = c_plot.figure.add_subplot(223)
ax.set_title('Spectrum Classes')
for i in range(c.shape[0]):
ax.plot(c[i], color = colors[i]) # @UndefinedVariable
ax.set_ylim([0, 1])
ax = c_plot.figure.add_subplot(224)
ax.set_title('Spectrum Variances')
for i in range(c.shape[0]):
ax.plot(c_covar[i], color = colors[i]) # @UndefinedVariable
ax.hold(0)
uniqu = np.unique(m)
pdf.savefig(c_plot.figure)
c_plot = pyPlotWidget()
ax = c_plot.figure.add_subplot(221)
ax.set_title('Classification')
ax.imshow(m, cmap = plt.cm.gist_rainbow , vmin = np.min(uniqu), vmax = np.max(uniqu)) # @UndefinedVariable
ax = c_plot.figure.add_subplot(222)
ax.set_title('Minimal distance to class')
imbar = ax.imshow(indicator, cmap = plt.cm.hot) # @UndefinedVariable
c_plot.figure.colorbar(imbar)
ax = c_plot.figure.add_subplot(223)
ax.set_title('Classification')
original = np.reshape(data, (h, w, n_b))
img = np.transpose(np.array((self.getBand(original, 39), self.getBand(original, 17), self.getBand(original, 6))), (1, 2, 0))
ax.imshow(img, interpolation = "nearest")
ax = c_plot.figure.add_subplot(224)
for i in range(c.shape[0]):
ax.plot([0], [0], color = colors[i], label = i) # @UndefinedVariable
ax.legend(prop = {'size':int(96.0 / n_class)})
ax.axis('off')
c_plot.canvas.draw()
pdf.savefig(c_plot.figure)
class_list = []
for class_i in range(np.min(uniqu), np.max(uniqu) + 1):
class_list.append(np.reshape(m == class_i, (-1)))
nn = 5
n_pages = int(np.ceil(n_class / float(nn)))
r = range(np.min(uniqu), np.max(uniqu) + 1)
for p in range(n_pages):
c_plot = pyPlotWidget()
ax = c_plot.figure.add_subplot(1, 1, 1)
for j, class_i in enumerate(r[(p * nn): np.min([((p + 1) * nn), len(r)])]):
# ax = self.c_plot.figure.add_subplot(np.max(uniqu) + 1 - np.min(uniqu), 3, 3 * (j + 1))
bool_i = class_list[j + p * nn]
data_class = data[bool_i, :]
# ax.axis('off')
results, headers = customization.compute_stats_per_class(data_class)
color = colors[j + p * nn]
ax.plot(nn - 1 - j + results[1], '-', color = color)
ax.plot(nn - 1 - j + results[1] + results[3], ':', color = color)
ax.plot(nn - 1 - j + results[1] - results[3], ':', color = color)
ax.plot(nn - 1 - j + results[0], '-.', color = color)
ax.plot(nn - 1 - j + results[2], '--', color = color)
ax.set_ylim([0, nn])
ax.set_axis_off()
ax.set_frame_on(True)
ax.set_axis_bgcolor('w')
pdf.savefig(c_plot.figure)
"""
### The following part was used to write tables to the pdf
nn = 12
t_n = int(np.ceil(n_b / float(nn)))
for i in range(nn):
c_plot = pyPlotWidget()
sub = data[:, (i * t_n):np.min((((i + 1) * t_n), n_b))]
for j, class_i in enumerate(range(np.min(uniqu), np.max(uniqu) + 1)):
print i, j
ax = c_plot.figure.add_subplot(1, np.max(uniqu) + 1, class_i + 1)
bool_i = class_list[j]
data_class = sub[bool_i, :]
ax.set_title('Class %s' % class_i)
ax.axis('off')
results, headers = customization.compute_stats_per_class(data_class)
matrix = np.transpose(np.array(results))
df = DataFrame(matrix, columns = headers, dtype = np.float32)
table(ax, df, rowLabels = range((i * t_n) + 1, np.min((((i + 1) * t_n), n_b)) + 1), loc = 'upper right', colWidths = [1.0 / matrix.shape[1]] * matrix.shape[1])
c_plot.canvas.draw()
pdf.savefig(c_plot.figure)
"""
url = QUrl('file://' + outputFile)
QDesktopServices.openUrl(url)
def getBand(self, array, i):
val = array[:, :, i]
max = np.percentile(val, 98.0) # / 1.5
min = np.percentile(val, 2.0) # * 1.5
val = (val - min) / (max - min)
val[val < 0] = 0
val[val > 1] = 1
val = np.array(np.round(val * 255), dtype = np.uint8)
return val
def someMethod2(self):
self.iface.mapCanvas().setMapTool(self.spectralTool)
self.spectralTool.plot = pyPlotWidget()
def someMethod3(self):
path = os.path.dirname(os.path.realpath(__file__))
url = QUrl('file://' + path + '/help/build/html/index.html')
QDesktopServices.openUrl(url)
def getCurrentImage(self):
rlayer = qgis_utils.iface.mapCanvas().currentLayer()
if rlayer == None:
return
else:
return rlayer.source()
def unload(self):
"""Removes the plugin menu item and icon from QGIS GUI."""
self.iface.removeToolBarIcon(self.toolbar1)
for action in self.actions:
self.iface.removePluginMenu(
self.tr(u'&Apex Quality Assessment'),
action)
self.iface.removeToolBarIcon(action)
# remove the toolbar
self.spectralTool.deactivate()
def WriteGeotiffNBand(self, raster, filepath, dtype, vectReference, proj):
nrows, ncols, n_b = np.shape(raster)
driver = gdal.GetDriverByName("GTiff")
dst_ds = driver.Create(filepath, ncols, nrows, n_b, dtype, ['COMPRESS=LZW'])
dst_ds.SetProjection(proj)
dst_ds.SetGeoTransform(vectReference)
for i in range(n_b):
R = np.array(raster[:, :, i], dtype = np.float32)
dst_ds.GetRasterBand(i + 1).WriteArray(R) # Red
dst_ds.GetRasterBand(i + 1).SetNoDataValue(-1)
dst_ds = None
| gpl-3.0 |
Kate-Willett/HadISDH_Marine_Build | EUSTACE_SST_MAT/PlotMetaData_APR2016.py | 1 | 47963 | #!/usr/local/sci/bin/python
# PYTHON2.7
#
# Author: Kate Willett
# Created: 1 April 2016
# Last update: 1 April 2016
# Location: /data/local/hadkw/HADCRUH2/MARINE/EUSTACEMDS/EUSTACE_SST_MAT/
# GitHub: https://github.com/Kate-Willett/HadISDH_Marine_Build/
# -----------------------
# CODE PURPOSE AND OUTPUT
# -----------------------
# This code reads in the ICOADS data output from QC using MDS_basic_KATE and
# pulls out the height and instrument metadata to make diagnostic plots.
#
# Obs height as provided by HOT, HOB or possibly inferred from LOV, HOP or HOA.
# HOT and or HOB are not often present.
# Can we infer HOT/HOB from HOA or HOP of LOV?
# Generally, HOA is higher than HOP - not a clear relationship.
# Generally, HOA is ~12m higher than HOT or HOB but this needs to be tested across more months - does this change over time/latitude etc?
# Generally, LOV is ~10*HOT/HOB
# I'm now writing some code to read in groups of months, pull out LOV,HOA,HOP,HOT,HOB,PT - and also the type/exposure info TOT, EOT, TOH, EOH
# - plots, EOT/EOH by latitude where 0 = none, 1 = aspirated/ventilated (A/VS), 2 = whirled (SG/SL/W), 3 = screen not aspirated (S/SN), 4 = unscreend (US)
# - prints, number and % of obs with TOT, EOT, TOH and EOH present
# - plots, HOB, HOT, HOA, HOP, LOV (second axis?) by latitude
# - prints, number and % of obs with HOB, HOT, HOA, HOP and LOV
# - plots, HOB, HOT, HOA, HOP, LOV histogram
# - prints, mean and standard deviation
# - plots, HOP vs HOA, HOA vs HOT, HOA vs HOB, HOP vs HOB, HOP vs HOT with lines of best fit
# - prints, number and % where HOP and HOA present, HOA and HOT present, HOA and HOB present, HOP and HOB present, HOP and HOT present, print equation for fit
# - plots, HOA - HOP, HOA - HOT, HOA - HOB, HOP - HOB and HOP - HOT with
# - prints, mean and standard deviation of difference series
# - plots, LOV vs HOT, LOV vs HOB with lines of best fit
# - prints, number and % where LOV and HOB present, where LOV and HOT present and equations for fit
# - plots, LOV / HOT, LOV / HOB
# - prints, mean and standard deviation of ratios
#
# This program creates two figures (pngs - can output eps if line is uncommented) and a text file that is appended to with each run
# Ideally You would run for each year
#
# -----------------------
# LIST OF MODULES
# -----------------------
# inbuilt:
# import datetime as dt
# import matplotlib.pyplot as plt
# import numpy as np
# from matplotlib.dates import date2num,num2date
# import sys, os
# import sys, getopt
# from scipy.optimize import curve_fit,fsolve,leastsq
# from scipy import pi,sqrt,exp
# from scipy.special import erf
# import scipy.stats
# from math import sqrt,pi
# import struct
# import pdb # pdb.set_trace() or c
#
# Kates:
# from LinearTrends import MedianPairwise - fits linear trend using Median Pairwise
# import MDS_RWtools as MDStool
#
# -----------------------
# DATA
# -----------------------
# /project/hadobs2/hadisdh/marine/ICOADS.2.5.1/EARclimNBC/new_suite_197312_ERAclimNBC.txt
#
# -----------------------
# HOW TO RUN THE CODE
# -----------------------
# set up date cluster choices
# year1, year2, month1, month2
#
# python2.7 PLotMetaData_APR2016 --year1 2000 --year2 2000 --month1 01 --month2 12 --typee ERAclimNBC
#
# This runs the code, outputs the plots and stops mid-process so you can then interact with the
# data.
#
# -----------------------
# OUTPUT
# -----------------------
# some plots:
# /data/local/hadkw/HADCRUH2/MARINE/IMAGES/InstrumentMetaDataDiags_all_ERAclimNBC_y1y2m1m2_APR2016.png
# /data/local/hadkw/HADCRUH2/MARINE/IMAGES/HeightMetaDataDiags_all_ERAclimNBC_y1y2m1m2_APR2016.png
#
# a text file of stats
# /data/local/hadkw/HADCRUH2/MARINE/LISTS/InstrumentMetaDataStats_all_ERAclimNBC_APR2016.txt
# /data/local/hadkw/HADCRUH2/MARINE/LISTS/HeightMetaDataStats_all_ERAclimNBC_APR2016.txt
#
# -----------------------
# VERSION/RELEASE NOTES
# -----------------------
#
# Version 2 (13 October 2016)
# ---------
#
# Enhancements
#
# Changes
# Instrument Exposure
# This now has A (aspirated) on its own and merges VS (ventilated screen) with S (screen) and SN (ships screen)
#
# Bug fixes
# The missing %) has been fixed
#
# Version 1 (1 April 2016)
# ---------
#
# Enhancements
#
# Changes
#
# Bug fixes
#
# -----------------------
# OTHER INFORMATION
# -----------------------
#
# So there really isn't much information for buoys or platforms (or many ships other than PT=5)
# A tiny number of buoys say they have height info - height = 74m - UNLIKELY!!!
# No platforms have height info
# Some buoys have exposure info - all info says US (unscreened) - therefore shouldn't need a bias correction?
# No platgorms have exposure info
#
#************************************************************************
# START
#************************************************************************
#import datetime as dt
import matplotlib
# use the Agg environment to generate an image rather than outputting to screen
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
#from matplotlib.dates import date2num,num2date
import sys, os
import sys, getopt
#from scipy.optimize import curve_fit,fsolve,leastsq
#from scipy import pi,sqrt,exp
#from scipy.special import erf
#import scipy.stats
#from math import sqrt,pi
#import struct
import pdb # pdb.set_trace() or c
#from LinearTrends import MedianPairwise
import MDS_RWtools as MDStool
# changeable variables
# Which month/year is this being run?
nowmon = 'OCT'
nowyear = '2016'
# Which ICOADS source are you using - check MDS_RWtools.py!!!
source = 'I300'
#************************************************************************
# Main
#************************************************************************
def main(argv):
# INPUT PARAMETERS AS STRINGS!!!!
year1 = '2000'
year2 = '2000'
month1 = '01' # months must be 01, 02 etc
month2 = '12'
typee = 'ERAclimNBC'
# MANUAL SWITCH FOR PLATFORM TYPE
# switch = 'all' # include all obs
# switch = 'ships' # include only ships with PT = 0, 1, 2, 3, 4, 5 - can be ships0, ships1, ships2, ships3, ships4, ships5
# switch = 'buoys' # include only those obs with PT = 6(moored), 8(ice) - can be buoys6, buoys8 (but very little point as no metadata!)
# switch = 'platforms' # include only those obs with PT = 9(ice), 10(oceanographic), 15 (fixed ocean) NO METADATA!!!
switch = 'all'
try:
opts, args = getopt.getopt(argv, "hi:",
["year1=","year2=","month1=","month2=","typee=","switch="])
except getopt.GetoptError:
print 'Usage (as strings) PlotMetaData_APR2016.py --year1 <1973> --year2 <1973> '+\
'--month1 <01> --month2 <12>'
sys.exit(2)
for opt, arg in opts:
if opt == "--year1":
try:
year1 = arg
except:
sys.exit("Failed: year1 not an integer")
elif opt == "--year2":
try:
year2 = arg
except:
sys.exit("Failed: year2 not an integer")
elif opt == "--month1":
try:
month1 = arg
except:
sys.exit("Failed: month1 not an integer")
elif opt == "--month2":
try:
month2 = arg
except:
sys.exit("Failed: month2 not an integer")
elif opt == "--typee":
try:
typee = arg
except:
sys.exit("Failed: typee not a string")
elif opt == "--switch":
try:
switch = arg
print(arg,switch)
except:
switch = 'all'
assert year1 != -999 and year2 != -999, "Year not specified."
print(year1, year2, month1, month2, typee, switch)
# pdb.set_trace()
#INDIR = '/project/hadobs2/hadisdh/marine/ICOADS.2.5.1/ERAclimNBC/'
#INFIL = 'new_suite_'
#INEXT = '_'+typee+'.txt'
# OUTDIR = '/data/local/hadkw/HADCRUH2/MARINE/'
OUTDIR = ''
OutTypeFil = 'IMAGES/InstrTypeMetaDataDiags_'+switch+'_'+typee+'_'+year1+year2+month1+month2+'_'+source+'_'+nowmon+nowyear
OutInstrFil = 'IMAGES/InstrumentMetaDataDiags_'+switch+'_'+typee+'_'+year1+year2+month1+month2+'_'+source+'_'+nowmon+nowyear
OutHeightFil = 'IMAGES/HeightMetaDataDiags_'+switch+'_'+typee+'_'+year1+year2+month1+month2+'_'+source+'_'+nowmon+nowyear
OutTypeText = 'LISTS/InstrTypeMetaDataStats_'+switch+'_'+typee+'_'+source+'_'+nowmon+nowyear+'.txt'
OutInstrumentText = 'LISTS/InstrumentMetaDataStats_'+switch+'_'+typee+'_'+source+'_'+nowmon+nowyear+'.txt'
OutHeightText = 'LISTS/HeightMetaDataStats_'+switch+'_'+typee+'_'+source+'_'+nowmon+nowyear+'.txt'
# create empty arrays for data bundles
nobs=0 # we're looking at all obs, not just those with 'good' data
LATbun = []
EOTbun = []
TOHbun = []
EOHbun = []
LOVbun = []
HOTbun = []
HOBbun = []
HOAbun = []
HOPbun = []
# loop through each month, read in data, keep metadata needed
for yy in range((int(year2)+1)-int(year1)):
for mm in range((int(month2)+1)-int(month1)):
print(str(yy+int(year1)),' ','{:02}'.format(mm+int(month1)))
MDSdict=MDStool.ReadMDSstandard(str(yy+int(year1)),'{:02}'.format(mm+int(month1)), typee)
if (nobs == 0):
if (switch == 'all'):
LATbun = MDSdict['LAT']
EOTbun = MDSdict['EOT']
TOHbun = MDSdict['TOH']
EOHbun = MDSdict['EOH']
LOVbun = MDSdict['LOV']
HOTbun = MDSdict['HOT']
HOBbun = MDSdict['HOB']
HOAbun = MDSdict['HOA']
HOPbun = MDSdict['HOP']
else:
if (switch[0:5] == 'ships'):
if (switch == 'ships'):
pointers = np.where(MDSdict['PT'] <= 5)[0]
elif (switch == 'ships0'):
pointers = np.where(MDSdict['PT'] == 0)[0]
elif (switch == 'ships1'):
pointers = np.where(MDSdict['PT'] == 1)[0]
elif (switch == 'ships2'):
pointers = np.where(MDSdict['PT'] == 2)[0]
elif (switch == 'ships3'):
pointers = np.where(MDSdict['PT'] == 3)[0]
elif (switch == 'ships4'):
pointers = np.where(MDSdict['PT'] == 4)[0]
elif (switch == 'ships5'):
pointers = np.where(MDSdict['PT'] == 5)[0]
elif (switch == 'buoys'):
pointers = np.where((MDSdict['PT'] == 6) | (MDSdict['PT'] == 8))[0]
elif (switch == 'platforms'):
pointers = np.where(MDSdict['PT'] >= 9)[0] # ok because only 9, 10 or 15 should be present
LATbun = MDSdict['LAT'][pointers]
EOTbun = MDSdict['EOT'][pointers]
TOHbun = MDSdict['TOH'][pointers]
EOHbun = MDSdict['EOH'][pointers]
LOVbun = MDSdict['LOV'][pointers]
HOTbun = MDSdict['HOT'][pointers]
HOBbun = MDSdict['HOB'][pointers]
HOAbun = MDSdict['HOA'][pointers]
HOPbun = MDSdict['HOP'][pointers]
else:
if (switch == 'all'):
LATbun = np.append(LATbun,MDSdict['LAT'])
EOTbun = np.append(EOTbun,MDSdict['EOT'])
TOHbun = np.append(TOHbun,MDSdict['TOH'])
EOHbun = np.append(EOHbun,MDSdict['EOH'])
LOVbun = np.append(LOVbun,MDSdict['LOV'])
HOTbun = np.append(HOTbun,MDSdict['HOT'])
HOBbun = np.append(HOBbun,MDSdict['HOB'])
HOAbun = np.append(HOAbun,MDSdict['HOA'])
HOPbun = np.append(HOPbun,MDSdict['HOP'])
else:
if (switch[0:5] == 'ships'):
if (switch == 'ships'):
pointers = np.where(MDSdict['PT'] <= 5)[0]
elif (switch == 'ships0'):
pointers = np.where(MDSdict['PT'] == 0)[0]
elif (switch == 'ships1'):
pointers = np.where(MDSdict['PT'] == 1)[0]
elif (switch == 'ships2'):
pointers = np.where(MDSdict['PT'] == 2)[0]
elif (switch == 'ships3'):
pointers = np.where(MDSdict['PT'] == 3)[0]
elif (switch == 'ships4'):
pointers = np.where(MDSdict['PT'] == 4)[0]
elif (switch == 'ships5'):
pointers = np.where(MDSdict['PT'] == 5)[0]
elif (switch == 'buoys'):
pointers = np.where((MDSdict['PT'] == 6) | (MDSdict['PT'] == 8))[0]
elif (switch == 'platforms'):
pointers = np.where(MDSdict['PT'] >= 9)[0] # ok because only 9, 10 or 15 should be present
LATbun = np.append(LATbun,MDSdict['LAT'][pointers])
EOTbun = np.append(EOTbun,MDSdict['EOT'][pointers])
TOHbun = np.append(TOHbun,MDSdict['TOH'][pointers])
EOHbun = np.append(EOHbun,MDSdict['EOH'][pointers])
LOVbun = np.append(LOVbun,MDSdict['LOV'][pointers])
HOTbun = np.append(HOTbun,MDSdict['HOT'][pointers])
HOBbun = np.append(HOBbun,MDSdict['HOB'][pointers])
HOAbun = np.append(HOAbun,MDSdict['HOA'][pointers])
HOPbun = np.append(HOPbun,MDSdict['HOP'][pointers])
if (switch == 'all'):
nobs = nobs + len(MDSdict['LAT'])
else:
nobs = nobs + len(MDSdict['LAT'][pointers])
MDSdict = 0 # clear out
# set up generall plotting stuff
# set up dimensions and plot - this is a 2 by 2 plot
# - plots, TOH by latitude where 1 = hygristor, 2 = chilled mirror, 3 = other, C = capacitance, E = electric, H = hair hygrometer, P = psychrometer, T = torsion
# - prints, number and % of obs with TOH present, and in the categories
plt.clf()
fig=plt.figure(figsize=(6,8))
ax=plt.axes([0.1,0.1,0.85,0.7])
plt.xlim(0,9)
plt.ylim(-91,91)
plt.xlabel('Instrument Type Category')
plt.ylabel('Latitude')
locs = ax.get_xticks().tolist()
labels=[x.get_text() for x in ax.get_xticklabels()]
labels[1] = '1'
labels[2] = '2'
labels[3] = '3'
labels[4] = 'C'
labels[5] = 'E'
labels[6] = 'H'
labels[7] = 'P'
labels[8] = 'T'
ax.set_xticks(locs)
ax.set_xticklabels(labels)
gotTOHs = np.where(TOHbun != 'No')[0]
Hgot1s = np.where(np.char.strip(TOHbun) == '1')[0]
Hgot2s = np.where(np.char.strip(TOHbun) == '2')[0]
Hgot3s = np.where(np.char.strip(TOHbun) == '3')[0]
HgotCs = np.where(np.char.strip(TOHbun) == 'C')[0]
HgotEs = np.where(np.char.strip(TOHbun) == 'E')[0]
HgotHs = np.where(np.char.strip(TOHbun) == 'H')[0]
HgotPs = np.where(np.char.strip(TOHbun) == 'P')[0]
HgotTs = np.where(np.char.strip(TOHbun) == 'T')[0]
Hgot1spct = 0.
Hgot2spct = 0.
Hgot3spct = 0.
HgotCspct = 0.
HgotEspct = 0.
HgotHspct = 0.
HgotPspct = 0.
HgotTspct = 0.
Hgotspct = 0.
if (nobs > 0):
Hgotspct = (len(gotTOHs)/float(nobs))*100
if (len(Hgot1s) > 0):
Hgot1spct = (len(Hgot1s)/float(len(gotTOHs)))*100
plt.scatter(np.repeat(1,len(Hgot1s)),LATbun[Hgot1s],c='grey',marker='o',linewidth=0.,s=12)
if (len(Hgot2s) > 0):
Hgot2spct = (len(Hgot2s)/float(len(gotTOHs)))*100
plt.scatter(np.repeat(2,len(Hgot2s)),LATbun[Hgot2s],c='red',marker='o',linewidth=0.,s=12)
if (len(Hgot3s) > 0):
Hgot3spct = (len(Hgot3s)/float(len(gotTOHs)))*100
plt.scatter(np.repeat(3,len(Hgot3s)),LATbun[Hgot3s],c='orange',marker='o',linewidth=0.,s=12)
if (len(HgotCs) > 0):
HgotCspct = (len(HgotCs)/float(len(gotTOHs)))*100
plt.scatter(np.repeat(4,len(HgotCs)),LATbun[HgotCs],c='gold',marker='o',linewidth=0.,s=12)
if (len(HgotEs) > 0):
HgotEspct = (len(HgotEs)/float(len(gotTOHs)))*100
plt.scatter(np.repeat(5,len(HgotEs)),LATbun[HgotEs],c='green',marker='o',linewidth=0.,s=12)
if (len(HgotHs) > 0):
HgotHspct = (len(HgotHs)/float(len(gotTOHs)))*100
plt.scatter(np.repeat(6,len(HgotHs)),LATbun[HgotHs],c='blue',marker='o',linewidth=0.,s=12)
if (len(HgotPs) > 0):
HgotPspct = (len(HgotPs)/float(len(gotTOHs)))*100
plt.scatter(np.repeat(7,len(HgotPs)),LATbun[HgotPs],c='indigo',marker='o',linewidth=0.,s=12)
if (len(HgotTs) > 0):
HgotTspct = (len(HgotTs)/float(len(gotTOHs)))*100
plt.scatter(np.repeat(8,len(HgotTs)),LATbun[HgotTs],c='violet',marker='o',linewidth=0.,s=12)
plt.annotate('TOH: '+str(len(gotTOHs))+' ('+"{:5.2f}".format(Hgotspct)+'%)',xy=(0.05,1.21),xycoords='axes fraction',size=12,color='black')
plt.annotate('1: '+str(len(Hgot1s))+' ('+"{:5.2f}".format(Hgot1spct)+'%)',xy=(0.05,1.16),xycoords='axes fraction',size=12,color='grey')
plt.annotate('2: '+str(len(Hgot2s))+' ('+"{:5.2f}".format(Hgot2spct)+'%)',xy=(0.05,1.11),xycoords='axes fraction',size=12,color='red')
plt.annotate('3: '+str(len(Hgot3s))+' ('+"{:5.2f}".format(Hgot3spct)+'%)',xy=(0.05,1.06),xycoords='axes fraction',size=12,color='orange')
plt.annotate('C: '+str(len(HgotCs))+' ('+"{:5.2f}".format(HgotCspct)+'%)',xy=(0.05,1.01),xycoords='axes fraction',size=12,color='gold')
plt.annotate('E: '+str(len(HgotEs))+' ('+"{:5.2f}".format(HgotEspct)+'%)',xy=(0.55,1.16),xycoords='axes fraction',size=12,color='green')
plt.annotate('H: '+str(len(HgotHs))+' ('+"{:5.2f}".format(HgotHspct)+'%)',xy=(0.55,1.11),xycoords='axes fraction',size=12,color='blue')
plt.annotate('P: '+str(len(HgotPs))+' ('+"{:5.2f}".format(HgotPspct)+'%)',xy=(0.55,1.06),xycoords='axes fraction',size=12,color='indigo')
plt.annotate('T: '+str(len(HgotTs))+' ('+"{:5.2f}".format(HgotTspct)+'%)',xy=(0.55,1.01),xycoords='axes fraction',size=12,color='violet')
#plt.tight_layout()
# plt.savefig(OUTDIR+OutTypeFil+".eps")
plt.savefig(OUTDIR+OutTypeFil+".png")
# Write out stats to file (append!)
filee=open(OUTDIR+OutTypeText,'a+')
filee.write(str(year1+' '+year2+' '+month1+' '+month2+' NOBS: '+'{:8d}'.format(nobs)+\
' TOH: '+'{:8d}'.format(len(gotTOHs))+' ('+"{:5.2f}".format(Hgotspct)+\
'%) 1: '+'{:8d}'.format(len(Hgot1s))+' ('+"{:5.2f}".format(Hgot1spct)+\
'%) 2: '+'{:8d}'.format(len(Hgot2s))+' ('+"{:5.2f}".format(Hgot2spct)+\
'%) 3: '+'{:8d}'.format(len(Hgot3s))+' ('+"{:5.2f}".format(Hgot3spct)+\
'%) C: '+'{:8d}'.format(len(HgotCs))+' ('+"{:5.2f}".format(HgotCspct)+\
'%) E: '+'{:8d}'.format(len(HgotEs))+' ('+"{:5.2f}".format(HgotEspct)+\
'%) H: '+'{:8d}'.format(len(HgotHs))+' ('+"{:5.2f}".format(HgotHspct)+\
'%) P: '+'{:8d}'.format(len(HgotPs))+' ('+"{:5.2f}".format(HgotPspct)+\
'%) T: '+'{:8d}'.format(len(HgotTs))+' ('+"{:5.2f}".format(HgotTspct)+\
'%)\n'))
filee.close()
# pdb.set_trace()
# - plots, EOT/EOH by latitude where 1 = none, 2 = aspirated/ventilated (A/VS), 3 = whirled (SG/SL/W), 4 = screen not aspirated (S/SN), 5 = unscreend (US)
# - prints, number and % of obs with EOT and EOH present, and in the categories
plt.clf()
fig=plt.figure(figsize=(6,8))
plt1=plt.axes([0.1,0.1,0.85,0.7])
plt.xlim(0,6)
plt.ylim(-91,91)
plt.xlabel('Exposure Category')
plt.ylabel('Latitude')
gotEOTs = np.where(EOTbun != 'Non')[0]
Tgot1s = np.where(EOTbun == 'Non')[0]
Tgot2s = np.where((EOTbun == 'A '))[0]
Tgot3s = np.where((EOTbun == 'SG ') | (EOTbun == 'SL ') | (EOTbun == 'W '))[0]
Tgot4s = np.where((EOTbun == 'S ') | (EOTbun == 'SN ') | (EOTbun == 'VS '))[0]
Tgot5s = np.where(EOTbun == 'US ')[0]
pctTgots = 0.
pctTgot2s = 0.
pctTgot3s = 0.
pctTgot4s = 0.
pctTgot5s = 0.
if (nobs > 0):
pctTgots = (len(gotEOTs)/float(nobs))*100
if (len(Tgot2s) > 0):
pctTgot2s = (len(Tgot2s)/float(len(gotEOTs)))*100
if (len(Tgot3s) > 0):
pctTgot3s = (len(Tgot3s)/float(len(gotEOTs)))*100
if (len(Tgot4s) > 0):
pctTgot4s = (len(Tgot4s)/float(len(gotEOTs)))*100
if (len(Tgot5s) > 0):
pctTgot5s = (len(Tgot5s)/float(len(gotEOTs)))*100
plt.scatter(np.repeat(0.9,len(Tgot1s)),LATbun[Tgot1s],c='grey',marker='o',linewidth=0.,s=12)
plt.scatter(np.repeat(1.9,len(Tgot2s)),LATbun[Tgot2s],c='red',marker='o',linewidth=0.,s=12)
plt.scatter(np.repeat(2.9,len(Tgot3s)),LATbun[Tgot3s],c='orange',marker='o',linewidth=0.,s=12)
plt.scatter(np.repeat(3.9,len(Tgot4s)),LATbun[Tgot4s],c='blue',marker='o',linewidth=0.,s=12)
plt.scatter(np.repeat(4.9,len(Tgot5s)),LATbun[Tgot5s],c='violet',marker='o',linewidth=0.,s=12)
#plt.annotate('EOT: '+str(len(gotEOTs))+' ('+"{:5.2f}".format((len(gotEOTs)/float(nobs))*100)+'%)',xy=(0.55,0.94),xycoords='axes fraction',size=10)
#plt.annotate('A/VS(2): '+str(len(Tgot2s))+' ('+"{:5.2f}".format((len(Tgot2s)/float(len(gotEOTs)))*100)+'%)',xy=(0.55,0.90),xycoords='axes fraction',size=10)
#plt.annotate('SG/SL/W(3): '+str(len(Tgot3s))+' ('+"{:5.2f}".format((len(Tgot3s)/float(len(gotEOTs)))*100)+'%)',xy=(0.55,0.86),xycoords='axes fraction',size=10)
#plt.annotate('S/SN(4): '+str(len(Tgot4s))+' ('+"{:5.2f}".format((len(Tgot4s)/float(len(gotEOTs)))*100)+'%)',xy=(0.55,0.82),xycoords='axes fraction',size=10)
#plt.annotate('US(5): '+str(len(Tgot5s))+' ('+"{:5.2f}".format((len(Tgot5s)/float(len(gotEOTs)))*100)+'%)',xy=(0.55,0.78),xycoords='axes fraction',size=10)
plt.annotate('EOT: '+str(len(gotEOTs))+' ('+"{:5.2f}".format(pctTgots)+'%)',xy=(0.05,1.21),xycoords='axes fraction',size=12,color='grey')
plt.annotate('A: '+str(len(Tgot2s))+' ('+"{:5.2f}".format(pctTgot2s)+'%)',xy=(0.05,1.16),xycoords='axes fraction',size=12,color='red')
plt.annotate('SG/SL/W: '+str(len(Tgot3s))+' ('+"{:5.2f}".format(pctTgot3s)+'%)',xy=(0.05,1.11),xycoords='axes fraction',size=12,color='orange')
plt.annotate('S/SN/VS: '+str(len(Tgot4s))+' ('+"{:5.2f}".format(pctTgot4s)+'%)',xy=(0.05,1.06),xycoords='axes fraction',size=12,color='blue')
plt.annotate('US: '+str(len(Tgot5s))+' ('+"{:5.2f}".format(pctTgot5s)+'%)',xy=(0.05,1.01),xycoords='axes fraction',size=12,color='violet')
gotEOHs = np.where(EOHbun != 'Non')[0]
Hgot1s = np.where(EOHbun == 'Non')[0]
Hgot2s = np.where((EOHbun == 'A '))[0]
Hgot3s = np.where((EOHbun == 'SG ') | (EOHbun == 'SL ') | (EOHbun == 'W '))[0]
Hgot4s = np.where((EOHbun == 'S ') | (EOHbun == 'SN ') | (EOHbun == 'VS '))[0]
Hgot5s = np.where(EOHbun == 'US ')[0]
pctHgots = 0.
pctHgot2s = 0.
pctHgot3s = 0.
pctHgot4s = 0.
pctHgot5s = 0.
if (nobs > 0):
pctHgots = (len(gotEOHs)/float(nobs))*100
if (len(Hgot2s) > 0):
pctHgot2s = (len(Hgot2s)/float(len(gotEOHs)))*100
if (len(Hgot3s) > 0):
pctHgot3s = (len(Hgot3s)/float(len(gotEOHs)))*100
if (len(Hgot4s) > 0):
pctHgot4s = (len(Hgot4s)/float(len(gotEOHs)))*100
if (len(Hgot5s) > 0):
pctHgot5s = (len(Hgot5s)/float(len(gotEOHs)))*100
plt.scatter(np.repeat(1.1,len(Hgot1s)),LATbun[Hgot1s],c='grey',marker='o',linewidth=0.,s=12)
plt.scatter(np.repeat(2.1,len(Hgot2s)),LATbun[Hgot2s],c='red',marker='o',linewidth=0.,s=12)
plt.scatter(np.repeat(3.1,len(Hgot3s)),LATbun[Hgot3s],c='orange',marker='o',linewidth=0.,s=12)
plt.scatter(np.repeat(4.1,len(Hgot4s)),LATbun[Hgot4s],c='blue',marker='o',linewidth=0.,s=12)
plt.scatter(np.repeat(5.1,len(Hgot5s)),LATbun[Hgot5s],c='violet',marker='o',linewidth=0.,s=12)
#plt.annotate('EOH: '+str(len(gotEOHs))+' ('+"{:5.2f}".format((len(gotEOHs)/float(nobs))*100)+'%)',xy=(0.55,0.74),xycoords='axes fraction',size=10)
#plt.annotate('A/VS(2): '+str(len(Hgot2s))+' ('+"{:5.2f}".format((len(Hgot2s)/float(len(gotEOHs)))*100)+'%)',xy=(0.55,0.70),xycoords='axes fraction',size=10)
#plt.annotate('SG/SL/W(3): '+str(len(Hgot3s))+' ('+"{:5.2f}".format((len(Hgot3s)/float(len(gotEOHs)))*100)+'%)',xy=(0.55,0.66),xycoords='axes fraction',size=10)
#plt.annotate('S/SN(4): '+str(len(Hgot4s))+' ('+"{:5.2f}".format((len(Hgot4s)/float(len(gotEOHs)))*100)+'%)',xy=(0.55,0.62),xycoords='axes fraction',size=10)
#plt.annotate('US(5): '+str(len(Hgot5s))+' ('+"{:5.2f}".format((len(Hgot5s)/float(len(gotEOHs)))*100)+'%)',xy=(0.55,0.58),xycoords='axes fraction',size=10)
plt.annotate('EOH: '+str(len(gotEOHs))+' ('+"{:5.2f}".format(pctHgots)+'%)',xy=(0.55,1.21),xycoords='axes fraction',size=12,color='grey')
plt.annotate('A: '+str(len(Hgot2s))+' ('+"{:5.2f}".format(pctHgot2s)+'%)',xy=(0.55,1.16),xycoords='axes fraction',size=12,color='red')
plt.annotate('SG/SL/W: '+str(len(Hgot3s))+' ('+"{:5.2f}".format(pctHgot3s)+'%)',xy=(0.55,1.11),xycoords='axes fraction',size=12,color='orange')
plt.annotate('S/SN/VS: '+str(len(Hgot4s))+' ('+"{:5.2f}".format(pctHgot4s)+'%)',xy=(0.55,1.06),xycoords='axes fraction',size=12,color='blue')
plt.annotate('US: '+str(len(Hgot5s))+' ('+"{:5.2f}".format(pctHgot5s)+'%)',xy=(0.55,1.01),xycoords='axes fraction',size=12,color='violet')
#plt.annotate('a)',xy=(0.03,0.94),xycoords='axes fraction',size=12)
# plt.savefig(OUTDIR+OutInstrFil+".eps")
plt.savefig(OUTDIR+OutInstrFil+".png")
# Write out stats to file (append!)
filee=open(OUTDIR+OutInstrumentText,'a+')
filee.write(str(year1+' '+year2+' '+month1+' '+month2+' NOBS: '+'{:8d}'.format(nobs)+\
' EOH: '+'{:8d}'.format(len(gotEOHs))+' ('+"{:5.2f}".format(pctHgots)+\
'%) A: '+'{:8d}'.format(len(Hgot2s))+' ('+"{:5.2f}".format(pctHgot2s)+\
'%) SG/SL/W: '+'{:8d}'.format(len(Hgot3s))+' ('+"{:5.2f}".format(pctHgot3s)+\
'%) S/SN/VS: '+'{:8d}'.format(len(Hgot4s))+' ('+"{:5.2f}".format(pctHgot4s)+\
'%) US: '+'{:8d}'.format(len(Hgot5s))+' ('+"{:5.2f}".format(pctHgot5s)+\
'%) EOT: '+'{:8d}'.format(len(gotEOTs))+' ('+"{:5.2f}".format(pctTgots)+\
'%) A: '+'{:8d}'.format(len(Tgot2s))+' ('+"{:5.2f}".format(pctTgot2s)+\
'%) SG/SL/W: '+'{:8d}'.format(len(Tgot3s))+' ('+"{:5.2f}".format(pctTgot3s)+\
'%) S/SN/VS: '+'{:8d}'.format(len(Tgot4s))+' ('+"{:5.2f}".format(pctTgot4s)+\
'%) US: '+'{:8d}'.format(len(Tgot5s))+' ('+"{:5.2f}".format(pctTgot5s)+\
'%)\n'))
filee.close()
xpos=[0.1, 0.6,0.1,0.6,0.1,0.6]
ypos=[0.7,0.7,0.37,0.37,0.04,0.04]
xfat=[0.37,0.37,0.37,0.37,0.37,0.37]
ytall=[0.28,0.28,0.28,0.28,0.28,0.28]
plt.clf()
f,axarr=plt.subplots(6,figsize=(10,12),sharex=False) #6,18
# - plots, HOB, HOT, HOA, HOP, LOV (second axis?) by latitude
# - prints, number and % of obs with HOB, HOT, HOA, HOP and LOV
axarr[0].set_position([xpos[0],ypos[0],xfat[0],ytall[0]])
axarr[0].set_xlim(0,60)
axarr[0].set_ylim(-91,91)
axarr[0].set_xlabel('Height (m)/Lenth (m/10.)')
axarr[0].set_ylabel('Latitude')
pctHOBs = 0.
pctHOTs = 0.
pctHOAs = 0.
pctHOPs = 0.
pctLOVs = 0.
gotHOBs = np.where(HOBbun > 0)[0]
if (len(gotHOBs) > 0):
axarr[0].scatter(HOBbun[gotHOBs]+0.,LATbun[gotHOBs],c='grey',marker='o',linewidth=0.,s=1)
pctHOBs = (len(gotHOBs)/float(nobs))*100
gotHOTs = np.where(HOTbun > 0)[0]
if (len(gotHOTs) > 0):
axarr[0].scatter(HOTbun[gotHOTs]+0.1,LATbun[gotHOTs],c='red',marker='o',linewidth=0.,s=1)
pctHOTs = (len(gotHOTs)/float(nobs))*100
gotHOAs = np.where(HOAbun > 0)[0]
if (len(gotHOAs) > 0):
axarr[0].scatter(HOAbun[gotHOAs]+0.2,LATbun[gotHOAs],c='orange',marker='o',linewidth=0.,s=1)
pctHOAs = (len(gotHOAs)/float(nobs))*100
gotHOPs = np.where(HOPbun > 0)[0]
if (len(gotHOPs) > 0):
axarr[0].scatter(HOPbun[gotHOPs]+0.3,LATbun[gotHOPs],c='blue',marker='o',linewidth=0.,s=1)
pctHOPs = (len(gotHOPs)/float(nobs))*100
gotLOVs = np.where(LOVbun > 0)[0]
if (len(gotLOVs) > 0):
axarr[0].scatter((LOVbun[gotLOVs]/10.)+0.4,LATbun[gotLOVs],c='violet',marker='o',linewidth=0.,s=1)
pctLOVs = (len(gotLOVs)/float(nobs))*100
axarr[0].annotate('a)',xy=(0.03,0.94),xycoords='axes fraction',size=12)
#axarr[0].annotate('HOB: '+str(len(gotHOBs))+' ('+"{:5.2f}".format(pctHOBs)+'%)',xy=(0.5,0.18),xycoords='axes fraction',size=10,color='grey')
#axarr[0].annotate('HOT: '+str(len(gotHOTs))+' ('+"{:5.2f}".format(pctHOTs)+'%)',xy=(0.5,0.14),xycoords='axes fraction',size=10,color='red')
#axarr[0].annotate('HOA: '+str(len(gotHOAs))+' ('+"{:5.2f}".format(pctHOAs)+'%)',xy=(0.5,0.1),xycoords='axes fraction',size=10,color='orange')
#axarr[0].annotate('HOP: '+str(len(gotHOPs))+' ('+"{:5.2f}".format(pctHOPs)+'%)',xy=(0.5,0.06),xycoords='axes fraction',size=10,color='blue')
#axarr[0].annotate('LOV: '+str(len(gotLOVs))+' ('+"{:5.2f}".format(pctLOVs)+'%)',xy=(0.5,0.02),xycoords='axes fraction',size=10,color='violet')
axarr[0].annotate('HOB: '+"{:5.2f}".format(pctHOBs)+'%',xy=(0.6,0.18),xycoords='axes fraction',size=10,color='grey')
axarr[0].annotate('HOT: '+"{:5.2f}".format(pctHOTs)+'%',xy=(0.6,0.14),xycoords='axes fraction',size=10,color='red')
axarr[0].annotate('HOA: '+"{:5.2f}".format(pctHOAs)+'%',xy=(0.6,0.1),xycoords='axes fraction',size=10,color='orange')
axarr[0].annotate('HOP: '+"{:5.2f}".format(pctHOPs)+'%',xy=(0.6,0.06),xycoords='axes fraction',size=10,color='blue')
axarr[0].annotate('LOV: '+"{:5.2f}".format(pctLOVs)+'%',xy=(0.6,0.02),xycoords='axes fraction',size=10,color='violet')
# - plots histogram HOB, HOT, HOA, HOP, LOV (second axis?
# - prints, mean and sd of HOB, HOT, HOA, HOP and LOV
axarr[1].set_position([xpos[1],ypos[1],xfat[1],ytall[1]])
axarr[1].set_xlim(0,60)
#axarr[1].set_ylim(0,5000) # let it do its own thing here
axarr[1].set_xlabel('Height/Lenth (m)')
axarr[1].set_ylabel('Frequency')
binsies = np.arange(0,61,1) # a range of bins from left most to right most point
meanHOBs = -99.9
meanHOTs = -99.9
meanHOAs = -99.9
meanHOPs = -99.9
meanLOVs = -99.9
sdHOBs = -99.9
sdHOTs = -99.9
sdHOAs = -99.9
sdHOPs = -99.9
sdLOVs = -99.9
if (len(gotHOBs) > 0):
HOBhist = np.histogram(HOBbun[gotHOBs],binsies) # produces a two D array, second dim as 401 points, first has 400
axarr[1].plot(HOBhist[1][0:60]+0.5,HOBhist[0],c='grey')
meanHOBs = np.mean(HOBbun[gotHOBs])
sdHOBs = np.std(HOBbun[gotHOBs])
axarr[1].annotate('HOB: '+"{:5.1f}".format(meanHOBs)+', '+"{:5.1f}".format(sdHOBs),xy=(0.6,0.94),xycoords='axes fraction',size=10,color='grey')
if (len(gotHOTs) > 0):
HOThist = np.histogram(HOTbun[gotHOTs],binsies) # produces a two D array, second dim as 401 points, first has 400
axarr[1].plot(HOThist[1][0:60]+0.5,HOThist[0],c='red')
meanHOTs = np.mean(HOTbun[gotHOTs])
sdHOTs = np.std(HOTbun[gotHOTs])
axarr[1].annotate('HOT: '+"{:5.1f}".format(meanHOTs)+', '+"{:5.1f}".format(sdHOTs),xy=(0.6,0.90),xycoords='axes fraction',size=10,color='red')
if (len(gotHOAs) > 0):
HOAhist = np.histogram(HOAbun[gotHOAs],binsies) # produces a two D array, second dim as 401 points, first has 400
axarr[1].plot(HOAhist[1][0:60]+0.5,HOAhist[0],c='orange')
meanHOAs = np.mean(HOAbun[gotHOAs])
sdHOAs = np.std(HOAbun[gotHOAs])
axarr[1].annotate('HOA: '+"{:5.1f}".format(meanHOAs)+', '+"{:5.1f}".format(sdHOAs),xy=(0.6,0.86),xycoords='axes fraction',size=10,color='orange')
if (len(gotHOPs) > 0):
HOPhist = np.histogram(HOPbun[gotHOPs],binsies) # produces a two D array, second dim as 401 points, first has 400
axarr[1].plot(HOPhist[1][0:60]+0.5,HOPhist[0],c='blue')
meanHOPs = np.mean(HOPbun[gotHOPs])
sdHOPs = np.std(HOPbun[gotHOPs])
axarr[1].annotate('HOP: '+"{:5.1f}".format(meanHOPs)+', '+"{:5.1f}".format(sdHOPs),xy=(0.6,0.82),xycoords='axes fraction',size=10,color='blue')
if (len(gotLOVs) > 0):
LOVhist = np.histogram(LOVbun[gotLOVs]/10.,binsies) # produces a two D array, second dim as 401 points, first has 400
axarr[1].plot(LOVhist[1][0:60]+0.5,LOVhist[0],c='violet')
meanLOVs = np.mean(LOVbun[gotLOVs])
sdLOVs = np.std(LOVbun[gotLOVs])
axarr[1].annotate('LOV: '+"{:5.1f}".format(meanLOVs)+', '+"{:5.1f}".format(sdLOVs),xy=(0.6,0.78),xycoords='axes fraction',size=10,color='violet')
axarr[1].annotate('b)',xy=(0.03,0.94),xycoords='axes fraction',size=12)
# - plots, HOA vs HOP, HOA vs HOT, HOA vs HOB, HOP vs HOB, HOP vs HOT with lines of best fit
# - prints, number and % where HOA and HOP present, HOA and HOT present, HOA and HOB present, HOP and HOB present, HOP and HOT present, print equation for fit
axarr[2].set_position([xpos[2],ypos[2],xfat[2],ytall[2]])
axarr[2].set_xlim(0,60)
axarr[2].set_ylim(0,60)
axarr[2].set_ylabel('Thermometer/Barmometer Height (m)')
axarr[2].set_xlabel('Anemometer/Visual Obs Platform Height (m)')
pctHOAPs = 0.
pctHOABs = 0.
pctHOATs = 0.
pctHOPBs = 0.
pctHOPTs = 0.
fitsAP = [-99.9,-99.9]
fitsAB = [-99.9,-99.9]
fitsAT = [-99.9,-99.9]
fitsPB = [-99.9,-99.9]
fitsPT = [-99.9,-99.9]
gotHOAPs = np.where((HOAbun > 0) & (HOPbun > 0))[0]
if (len(gotHOAPs) > 0):
axarr[2].scatter(HOAbun[gotHOAPs],HOPbun[gotHOAPs],c='black',marker='o',linewidth=0.,s=2)
fitsAP = np.polyfit(HOAbun[gotHOAPs],HOPbun[gotHOAPs],1)
# Get RMSE of residuals from line of best fit
RMSE_AP = np.sqrt(np.mean((HOPbun[gotHOAPs] - fitsAP[0]*HOAbun[gotHOAPs]+fitsAP[1])**2))
pctHOAPs = (len(gotHOAPs)/float(nobs))*100
axarr[2].plot(HOAbun[gotHOAPs],fitsAP[0]*HOAbun[gotHOAPs]+fitsAP[1],c='black')
#axarr[2].annotate('HOBHOA: '+str(len(gotHOABs))+' ('+"{:5.2f}".format(pctHOABs)+'%), '+"{:5.2f}".format(fitsAB[0])+', '+"{:5.2f}".format(fitsAB[1])+' ('+"{:6.2f}".format(RMSE_AB)+')',xy=(0.1,0.94),xycoords='axes fraction',size=10,color='grey')
axarr[2].annotate('HOPHOA: '+"{:5.2f}".format(pctHOAPs)+'%, '+"{:5.2f}".format(fitsAP[0])+', '+"{:5.2f}".format(fitsAP[1])+' ('+"{:6.2f}".format(RMSE_AP)+')',xy=(0.1,0.78),xycoords='axes fraction',size=10,color='black')
gotHOABs = np.where((HOAbun > 0) & (HOBbun > 0))[0]
if (len(gotHOABs) > 0):
axarr[2].scatter(HOAbun[gotHOABs],HOBbun[gotHOABs],c='grey',marker='o',linewidth=0.,s=2)
fitsAB = np.polyfit(HOAbun[gotHOABs],HOBbun[gotHOABs],1)
# Get RMSE of residuals from line of best fit
RMSE_AB = np.sqrt(np.mean((HOBbun[gotHOABs] - fitsAB[0]*HOAbun[gotHOABs]+fitsAB[1])**2))
pctHOABs = (len(gotHOABs)/float(nobs))*100
axarr[2].plot(HOAbun[gotHOABs],fitsAB[0]*HOAbun[gotHOABs]+fitsAB[1],c='grey')
#axarr[2].annotate('HOBHOA: '+str(len(gotHOABs))+' ('+"{:5.2f}".format(pctHOABs)+'%), '+"{:5.2f}".format(fitsAB[0])+', '+"{:5.2f}".format(fitsAB[1])+' ('+"{:6.2f}".format(RMSE_AB)+')',xy=(0.1,0.94),xycoords='axes fraction',size=10,color='grey')
axarr[2].annotate('HOBHOA: '+"{:5.2f}".format(pctHOABs)+'%, '+"{:5.2f}".format(fitsAB[0])+', '+"{:5.2f}".format(fitsAB[1])+' ('+"{:6.2f}".format(RMSE_AB)+')',xy=(0.1,0.94),xycoords='axes fraction',size=10,color='grey')
gotHOATs = np.where((HOAbun > 0) & (HOTbun > 0))[0]
if (len(gotHOATs) > 0):
axarr[2].scatter(HOAbun[gotHOATs],HOTbun[gotHOATs],c='red',marker='o',linewidth=0.,s=2)
fitsAT = np.polyfit(HOAbun[gotHOATs],HOTbun[gotHOATs],1)
RMSE_AT = np.sqrt(np.mean((HOTbun[gotHOATs] - fitsAT[0]*HOAbun[gotHOATs]+fitsAT[1])**2))
pctHOATs = (len(gotHOATs)/float(nobs))*100
axarr[2].plot(HOAbun[gotHOATs],fitsAT[0]*HOAbun[gotHOATs]+fitsAT[1],c='red')
#axarr[2].annotate('HOTHOA: '+str(len(gotHOATs))+' ('+"{:5.2f}".format(pctHOATs)+'%), '+"{:5.2f}".format(fitsAT[0])+', '+"{:5.2f}".format(fitsAT[1])+' ('+"{:6.2f}".format(RMSE_AT)+')',xy=(0.1,0.90),xycoords='axes fraction',size=10,color='red')
axarr[2].annotate('HOTHOA: '+"{:5.2f}".format(pctHOATs)+'%, '+"{:5.2f}".format(fitsAT[0])+', '+"{:5.2f}".format(fitsAT[1])+' ('+"{:6.2f}".format(RMSE_AT)+')',xy=(0.1,0.90),xycoords='axes fraction',size=10,color='red')
gotHOPBs = np.where((HOPbun > 0) & (HOBbun > 0))[0]
if (len(gotHOPBs) > 0):
axarr[2].scatter(HOPbun[gotHOPBs],HOBbun[gotHOPBs],c='orange',marker='o',linewidth=0.,s=2)
fitsPB = np.polyfit(HOPbun[gotHOPBs],HOBbun[gotHOPBs],1)
RMSE_PB = np.sqrt(np.mean((HOBbun[gotHOPBs] - fitsPB[0]*HOPbun[gotHOPBs]+fitsPB[1])**2))
pctHOPBs = (len(gotHOPBs)/float(nobs))*100
axarr[2].plot(HOPbun[gotHOPBs],fitsPB[0]*HOPbun[gotHOPBs]+fitsPB[1],c='orange')
#axarr[2].annotate('HOBHOP: '+str(len(gotHOPBs))+' ('+"{:5.2f}".format(pctHOPBs)+'%), '+"{:5.2f}".format(fitsPB[0])+', '+"{:5.2f}".format(fitsPB[1])+' ('+"{:6.2f}".format(RMSE_PB)+')',xy=(0.1,0.86),xycoords='axes fraction',size=10,color='orange')
axarr[2].annotate('HOBHOP: '+"{:5.2f}".format(pctHOPBs)+'%, '+"{:5.2f}".format(fitsPB[0])+', '+"{:5.2f}".format(fitsPB[1])+' ('+"{:6.2f}".format(RMSE_PB)+')',xy=(0.1,0.86),xycoords='axes fraction',size=10,color='orange')
gotHOPTs = np.where((HOPbun > 0) & (HOTbun > 0))[0]
if (len(gotHOPTs) > 0):
axarr[2].scatter(HOPbun[gotHOPTs],HOTbun[gotHOPTs],c='blue',marker='o',linewidth=0.,s=2)
fitsPT = np.polyfit(HOPbun[gotHOPTs],HOTbun[gotHOPTs],1)
RMSE_PT = np.sqrt(np.mean((HOTbun[gotHOPTs] - fitsPT[0]*HOPbun[gotHOPTs]+fitsPT[1])**2))
pctHOPTs = (len(gotHOPTs)/float(nobs))*100
axarr[2].plot(HOPbun[gotHOPTs],fitsPT[0]*HOPbun[gotHOPTs]+fitsPT[1],c='blue')
#axarr[2].annotate('HOTHOP: '+str(len(gotHOPTs))+' ('+"{:5.2f}".format(pctHOPTs)+'%), '+"{:5.2f}".format(fitsPT[0])+', '+"{:5.2f}".format(fitsPT[1])+' ('+"{:6.2f}".format(RMSE_PT)+')',xy=(0.1,0.82),xycoords='axes fraction',size=10,color='blue')
axarr[2].annotate('HOTHOP: '+"{:5.2f}".format(pctHOPTs)+'%, '+"{:5.2f}".format(fitsPT[0])+', '+"{:5.2f}".format(fitsPT[1])+' ('+"{:6.2f}".format(RMSE_PT)+')',xy=(0.1,0.82),xycoords='axes fraction',size=10,color='blue')
axarr[2].annotate('c)',xy=(0.03,0.94),xycoords='axes fraction',size=12)
# - plots differences HOA - HOT, HOA - HOB, HOP - HOB, HOP - HOT with lines of best fit
# - prints, mean and std of difference HOA and HOT present, HOA and HOB present, HOP and HOB present, HOP and HOT present, print equation for fit
axarr[3].set_position([xpos[3],ypos[3],xfat[3],ytall[3]])
axarr[3].set_xlim(0,60)
axarr[3].set_ylim(0,60)
axarr[3].set_xlabel('Anemometer/Visual Obs Platform Height (m)')
#axarr[3].set_xlabel('Thermometer/Barmometer Height (m)')
axarr[3].set_ylabel('Height Difference (m)')
meanHOAPs = -99.9
meanHOABs = -99.9
meanHOATs = -99.9
meanHOPBs = -99.9
meanHOPTs = -99.9
sdHOAPs = -99.9
sdHOABs = -99.9
sdHOATs = -99.9
sdHOPBs = -99.9
sdHOPTs = -99.9
if (len(gotHOAPs) > 0):
axarr[3].scatter(HOAbun[gotHOAPs],HOAbun[gotHOAPs]-HOPbun[gotHOAPs],c='black',marker='o',linewidth=0.,s=2)
meanHOAPs = np.mean(HOAbun[gotHOAPs]-HOPbun[gotHOAPs])
sdHOAPs = np.std(HOAbun[gotHOAPs]-HOPbun[gotHOAPs])
axarr[3].annotate('HOPHOA: '+"{:5.1f}".format(meanHOAPs)+', '+"{:5.1f}".format(sdHOAPs),xy=(0.5,0.78),xycoords='axes fraction',size=10,color='black')
if (len(gotHOABs) > 0):
axarr[3].scatter(HOAbun[gotHOABs],HOAbun[gotHOABs]-HOBbun[gotHOABs],c='grey',marker='o',linewidth=0.,s=2)
meanHOABs = np.mean(HOAbun[gotHOABs]-HOBbun[gotHOABs])
sdHOABs = np.std(HOAbun[gotHOABs]-HOBbun[gotHOABs])
axarr[3].annotate('HOBHOA: '+"{:5.1f}".format(meanHOABs)+', '+"{:5.1f}".format(sdHOABs),xy=(0.5,0.94),xycoords='axes fraction',size=10,color='grey')
if (len(gotHOATs) > 0):
axarr[3].scatter(HOAbun[gotHOATs],HOAbun[gotHOATs]-HOTbun[gotHOATs],c='red',marker='o',linewidth=0.,s=2)
meanHOATs = np.mean(HOAbun[gotHOATs]-HOTbun[gotHOATs])
sdHOATs = np.std(HOAbun[gotHOATs]-HOTbun[gotHOATs])
axarr[3].annotate('HOTHOA: '+"{:5.1f}".format(meanHOATs)+', '+"{:5.1f}".format(sdHOATs),xy=(0.5,0.90),xycoords='axes fraction',size=10,color='red')
if (len(gotHOPBs) > 0):
axarr[3].scatter(HOPbun[gotHOPBs],HOPbun[gotHOPBs]-HOBbun[gotHOPBs],c='orange',marker='o',linewidth=0.,s=2)
meanHOPBs = np.mean(HOPbun[gotHOPBs]-HOBbun[gotHOPBs])
sdHOPBs = np.std(HOPbun[gotHOPBs]-HOBbun[gotHOPBs])
axarr[3].annotate('HOBHOP: '+"{:5.1f}".format(meanHOPBs)+', '+"{:5.1f}".format(sdHOPBs),xy=(0.5,0.86),xycoords='axes fraction',size=10,color='orange')
if (len(gotHOPTs) > 0):
axarr[3].scatter(HOPbun[gotHOPTs],HOPbun[gotHOPTs]-HOTbun[gotHOPTs],c='blue',marker='o',linewidth=0.,s=2)
meanHOPTs = np.mean(HOPbun[gotHOPTs]-HOTbun[gotHOPTs])
sdHOPTs = np.std(HOPbun[gotHOPTs]-HOTbun[gotHOPTs])
axarr[3].annotate('HOTHOP: '+"{:5.1f}".format(meanHOPTs)+', '+"{:5.1f}".format(sdHOPTs),xy=(0.5,0.82),xycoords='axes fraction',size=10,color='blue')
axarr[3].annotate('d)',xy=(0.03,0.94),xycoords='axes fraction',size=12)
# - plots, LOV vs HOT, LOV vs HOB with lines of best fit
# - prints, number and % where LOV and HOB present, where LOV and HOT present and equations for fit
axarr[4].set_position([xpos[4],ypos[4],xfat[4],ytall[4]])
axarr[4].set_ylim(0,60)
axarr[4].set_xlim(0,500)
axarr[4].set_ylabel('Thermometer/Barmometer Height (m)')
axarr[4].set_xlabel('Length of Vessell (m)')
pctHOBLs = 0.
pctHOTLs = 0.
fitsLB = [-99.9, -99.9]
fitsLT = [-99.9, -99.9]
gotHOBLs = np.where((LOVbun > 0) & (HOBbun > 0))[0]
if (len(gotHOBLs) > 0):
axarr[4].scatter(LOVbun[gotHOBLs],HOBbun[gotHOBLs],c='grey',marker='o',linewidth=0.,s=1)
fitsLB = np.polyfit(LOVbun[gotHOBLs],HOBbun[gotHOBLs],1)
RMSE_LB = np.sqrt(np.mean((LOVbun[gotHOBLs] - fitsLB[0]*LOVbun[gotHOBLs]+fitsLB[1])**2))
pctHOBLs = (len(gotHOBLs)/float(nobs))*100
axarr[4].plot(LOVbun[gotHOBLs],fitsLB[0]*LOVbun[gotHOBLs]+fitsLB[1],c='grey')
#axarr[4].annotate('HOBLOV: '+str(len(gotHOBLs))+' ('+"{:5.2f}".format(pctHOBLs)+'%), '+"{:5.2f}".format(fitsLB[0])+', '+"{:5.2f}".format(fitsLB[1])' ('+"{:6.2f}".format(RMSE_LB)+')',xy=(0.1,0.94),xycoords='axes fraction',size=10,color='grey')
axarr[4].annotate('HOBLOV: '+"{:5.2f}".format(pctHOBLs)+'%, '+"{:5.2f}".format(fitsLB[0])+', '+"{:5.2f}".format(fitsLB[1])+' ('+"{:6.2f}".format(RMSE_LB)+')',xy=(0.1,0.94),xycoords='axes fraction',size=10,color='grey')
gotHOTLs = np.where((LOVbun > 0) & (HOTbun > 0))[0]
if (len(gotHOTLs) > 0):
axarr[4].scatter(LOVbun[gotHOTLs],HOTbun[gotHOTLs],c='red',marker='o',linewidth=0.,s=1)
fitsLT = np.polyfit(LOVbun[gotHOTLs],HOTbun[gotHOTLs],1)
RMSE_LT = np.sqrt(np.mean((LOVbun[gotHOTLs] - fitsLT[0]*LOVbun[gotHOTLs]+fitsLT[1])**2))
pctHOTLs = (len(gotHOTLs)/float(nobs))*100
axarr[4].plot(LOVbun[gotHOTLs],fitsLT[0]*LOVbun[gotHOTLs]+fitsLT[1],c='red')
#axarr[4].annotate('HOTLOV: '+str(len(gotHOTLs))+' ('+"{:5.2f}".format(pctHOTLs)+'%), '+"{:5.2f}".format(fitsLT[0])+', '+"{:5.2f}".format(fitsLT[1])' ('+"{:6.2f}".format(RMSE_LT)+')',xy=(0.1,0.90),xycoords='axes fraction',size=10,color='red')
axarr[4].annotate('HOTLOV: '+"{:5.2f}".format(pctHOTLs)+'%, '+"{:5.2f}".format(fitsLT[0])+', '+"{:5.2f}".format(fitsLT[1])+' ('+"{:6.2f}".format(RMSE_LT)+')',xy=(0.1,0.90),xycoords='axes fraction',size=10,color='red')
axarr[4].annotate('e)',xy=(0.03,0.94),xycoords='axes fraction',size=12)
# - plots ratio LOV / HOT, LOV / HOB with lines of best fit
# - prints, mean and std where LOV and HOB present, where LOV and HOT present and equations for fit
axarr[5].set_position([xpos[5],ypos[5],xfat[5],ytall[5]])
axarr[5].set_xlim(0,500)
axarr[5].set_ylim(0,50)
axarr[5].set_xlabel('Length of Vessell (m)')
axarr[5].set_ylabel('Ratio: Vessell Length to Instrument Height')
meanHOBLs = -99.9
meanHOTLs = -99.9
sdHOBLs = -99.9
sdHOTLs = -99.9
if (len(gotHOBLs) > 0):
axarr[5].scatter(LOVbun[gotHOBLs],LOVbun[gotHOBLs]/np.array(HOBbun[gotHOBLs],dtype=float),c='grey',marker='o',linewidth=0.,s=2)
meanHOBLs = np.mean(LOVbun[gotHOBLs]/np.array(HOBbun[gotHOBLs],dtype=float))
sdHOBLs = np.std(LOVbun[gotHOBLs]-np.array(HOBbun[gotHOBLs],dtype=float))
axarr[5].annotate('HOBLOV: '+"{:5.1f}".format(meanHOBLs)+', '+"{:5.1f}".format(sdHOBLs),xy=(0.5,0.94),xycoords='axes fraction',size=10,color='grey')
if (len(gotHOTLs) > 0):
axarr[5].scatter(LOVbun[gotHOTLs],LOVbun[gotHOTLs]/np.array(HOTbun[gotHOTLs],dtype=float),c='red',marker='o',linewidth=0.,s=2)
meanHOTLs = np.mean(LOVbun[gotHOTLs]/np.array(HOTbun[gotHOTLs],dtype=float))
sdHOTLs = np.std(LOVbun[gotHOTLs]-np.array(HOTbun[gotHOTLs],dtype=float))
axarr[5].annotate('HOTLOV: '+"{:5.1f}".format(np.mean(LOVbun[gotHOTLs]/np.array(HOTbun[gotHOTLs],dtype=float)))+', '+"{:5.1f}".format(np.std(LOVbun[gotHOTLs]-np.array(HOTbun[gotHOTLs],dtype=float))),xy=(0.5,0.90),xycoords='axes fraction',size=10,color='red')
axarr[5].annotate('f)',xy=(0.03,0.94),xycoords='axes fraction',size=12)
# save plots as eps and png
# plt.savefig(OUTDIR+OutHeightFil+".eps")
plt.savefig(OUTDIR+OutHeightFil+".png")
# Write out stats to file (append!)
filee=open(OUTDIR+OutHeightText,'a+')
print(fitsAB)
filee.write(str(year1+' '+year2+' '+month1+' '+month2+' NOBS: '+'{:8d}'.format(nobs)+\
' HOB: '+'{:8d}'.format(len(gotHOBs))+' ('+"{:5.2f}".format(pctHOBs)+'%) '+"{:5.1f}".format(meanHOBs)+' '+"{:5.1f}".format(sdHOBs)+\
' HOT: '+'{:8d}'.format(len(gotHOTs))+' ('+"{:5.2f}".format(pctHOTs)+'%) '+"{:5.1f}".format(meanHOTs)+' '+"{:5.1f}".format(sdHOTs)+\
' HOA: '+'{:8d}'.format(len(gotHOAs))+' ('+"{:5.2f}".format(pctHOAs)+'%) '+"{:5.1f}".format(meanHOAs)+' '+"{:5.1f}".format(sdHOAs)+\
' HOP: '+'{:8d}'.format(len(gotHOPs))+' ('+"{:5.2f}".format(pctHOPs)+'%) '+"{:5.1f}".format(meanHOPs)+' '+"{:5.1f}".format(sdHOPs)+\
' LOV: '+'{:8d}'.format(len(gotLOVs))+' ('+"{:5.2f}".format(pctLOVs)+'%) '+"{:5.1f}".format(meanLOVs)+' '+"{:5.1f}".format(sdLOVs)+\
' HOPHOA: '+'{:8d}'.format(len(gotHOAPs))+' ('+"{:5.2f}".format(pctHOAPs)+'%) '+\
"{:6.2f}".format(fitsAP[0])+' '+"{:6.2f}".format(fitsAP[1])+' '+\
"{:5.1f}".format(meanHOAPs)+' '+"{:5.1f}".format(sdHOAPs)+\
' HOBHOA: '+'{:8d}'.format(len(gotHOABs))+' ('+"{:5.2f}".format(pctHOABs)+'%) '+\
"{:6.2f}".format(fitsAB[0])+' '+"{:6.2f}".format(fitsAB[1])+' '+\
"{:5.1f}".format(meanHOABs)+' '+"{:5.1f}".format(sdHOABs)+\
' HOTHOA: '+'{:8d}'.format(len(gotHOATs))+' ('+"{:5.2f}".format(pctHOATs)+'%) '+\
"{:6.2f}".format(fitsAT[0])+' '+"{:6.2f}".format(fitsAT[1])+' '+\
"{:5.1f}".format(meanHOATs)+' '+"{:5.1f}".format(sdHOATs)+\
' HOBHOP: '+'{:8d}'.format(len(gotHOPBs))+' ('+"{:5.2f}".format(pctHOPBs)+'%) '+\
"{:6.2f}".format(fitsPB[0])+' '+"{:6.2f}".format(fitsPB[1])+' '+\
"{:5.1f}".format(meanHOPBs)+' '+"{:5.1f}".format(sdHOPBs)+\
' HOTHOP: '+'{:8d}'.format(len(gotHOPTs))+' ('+"{:5.2f}".format(pctHOPTs)+'%) '+\
"{:6.2f}".format(fitsPT[0])+' '+"{:6.2f}".format(fitsPT[1])+' '+\
"{:5.1f}".format(meanHOPTs)+' '+"{:5.1f}".format(sdHOPTs)+\
' HOBLOV: '+'{:8d}'.format(len(gotHOBLs))+' ('+"{:5.2f}".format(pctHOBLs)+'%) '+\
"{:6.2f}".format(fitsLB[0])+' '+"{:6.2f}".format(fitsLB[1])+' '+\
"{:5.1f}".format(meanHOBLs)+' '+"{:5.1f}".format(sdHOBLs)+\
' HOTLOV: '+'{:8d}'.format(len(gotHOTLs))+' ('+"{:5.2f}".format(pctHOTLs)+'%) '+\
"{:6.2f}".format(fitsLT[0])+' '+"{:6.2f}".format(fitsLT[1])+' '+\
"{:5.1f}".format(meanHOTLs)+' '+"{:5.1f}".format(sdHOTLs)+\
'\n'))
filee.close()
#pdb.set_trace()
if __name__ == '__main__':
main(sys.argv[1:])
#************************************************************************
| cc0-1.0 |
togawa28/mousestyles | doc/source/_sphinxext/plot_directive.py | 3 | 28483 | """
A directive for including a matplotlib plot in a Sphinx document.
By default, in HTML output, `plot` will include a .png file with a
link to a high-res .png and .pdf. In LaTeX output, it will include a
.pdf.
The source code for the plot may be included in one of three ways:
1. **A path to a source file** as the argument to the directive::
.. plot:: path/to/plot.py
When a path to a source file is given, the content of the
directive may optionally contain a caption for the plot::
.. plot:: path/to/plot.py
This is the caption for the plot
Additionally, one may specify the name of a function to call (with
no arguments) immediately after importing the module::
.. plot:: path/to/plot.py plot_function1
2. Included as **inline content** to the directive::
.. plot::
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
img = mpimg.imread('_static/stinkbug.png')
imgplot = plt.imshow(img)
3. Using **doctest** syntax::
.. plot::
A plotting example:
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3], [4,5,6])
Options
-------
The ``plot`` directive supports the following options:
format : {'python', 'doctest'}
Specify the format of the input
include-source : bool
Whether to display the source code. The default can be changed
using the `plot_include_source` variable in conf.py
encoding : str
If this source file is in a non-UTF8 or non-ASCII encoding,
the encoding must be specified using the `:encoding:` option.
The encoding will not be inferred using the ``-*- coding -*-``
metacomment.
context : bool or str
If provided, the code will be run in the context of all
previous plot directives for which the `:context:` option was
specified. This only applies to inline code plot directives,
not those run from files. If the ``:context: reset`` option is
specified, the context is reset for this and future plots, and
previous figures are closed prior to running the code.
``:context:close-figs`` keeps the context but closes previous figures
before running the code.
nofigs : bool
If specified, the code block will be run, but no figures will
be inserted. This is usually useful with the ``:context:``
option.
Additionally, this directive supports all of the options of the
`image` directive, except for `target` (since plot will add its own
target). These include `alt`, `height`, `width`, `scale`, `align` and
`class`.
Configuration options
---------------------
The plot directive has the following configuration options:
plot_include_source
Default value for the include-source option
plot_html_show_source_link
Whether to show a link to the source in HTML.
plot_pre_code
Code that should be executed before each plot.
plot_basedir
Base directory, to which ``plot::`` file names are relative
to. (If None or empty, file names are relative to the
directory where the file containing the directive is.)
plot_formats
File formats to generate. List of tuples or strings::
[(suffix, dpi), suffix, ...]
that determine the file format and the DPI. For entries whose
DPI was omitted, sensible defaults are chosen. When passing from
the command line through sphinx_build the list should be passed as
suffix:dpi,suffix:dpi, ....
plot_html_show_formats
Whether to show links to the files in HTML.
plot_rcparams
A dictionary containing any non-standard rcParams that should
be applied before each plot.
plot_apply_rcparams
By default, rcParams are applied when `context` option is not used in
a plot directive. This configuration option overrides this behavior
and applies rcParams before each plot.
plot_working_directory
By default, the working directory will be changed to the directory of
the example, so the code can get at its data files, if any. Also its
path will be added to `sys.path` so it can import any helper modules
sitting beside it. This configuration option can be used to specify
a central directory (also added to `sys.path`) where data files and
helper modules for all code are located.
plot_template
Provide a customized template for preparing restructured text.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange
import sys, os, shutil, io, re, textwrap
from os.path import relpath
import traceback
import warnings
if not six.PY3:
import cStringIO
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives.images import Image
align = Image.align
import sphinx
sphinx_version = sphinx.__version__.split(".")
# The split is necessary for sphinx beta versions where the string is
# '6b1'
sphinx_version = tuple([int(re.split('[^0-9]', x)[0])
for x in sphinx_version[:2]])
try:
# Sphinx depends on either Jinja or Jinja2
import jinja2
def format_template(template, **kw):
return jinja2.Template(template).render(**kw)
except ImportError:
import jinja
def format_template(template, **kw):
return jinja.from_string(template, **kw)
import matplotlib
import matplotlib.cbook as cbook
try:
with warnings.catch_warnings(record=True):
warnings.simplefilter("error", UserWarning)
matplotlib.use('Agg')
except UserWarning:
import matplotlib.pyplot as plt
plt.switch_backend("Agg")
else:
import matplotlib.pyplot as plt
from matplotlib import _pylab_helpers
__version__ = 2
#------------------------------------------------------------------------------
# Registration hook
#------------------------------------------------------------------------------
def plot_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(arguments, content, options, state_machine, state, lineno)
plot_directive.__doc__ = __doc__
def _option_boolean(arg):
if not arg or not arg.strip():
# no argument given, assume used as a flag
return True
elif arg.strip().lower() in ('no', '0', 'false'):
return False
elif arg.strip().lower() in ('yes', '1', 'true'):
return True
else:
raise ValueError('"%s" unknown boolean' % arg)
def _option_context(arg):
if arg in [None, 'reset', 'close-figs']:
return arg
raise ValueError("argument should be None or 'reset' or 'close-figs'")
def _option_format(arg):
return directives.choice(arg, ('python', 'doctest'))
def _option_align(arg):
return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
"right"))
def mark_plot_labels(app, document):
"""
To make plots referenceable, we need to move the reference from
the "htmlonly" (or "latexonly") node to the actual figure node
itself.
"""
for name, explicit in six.iteritems(document.nametypes):
if not explicit:
continue
labelid = document.nameids[name]
if labelid is None:
continue
node = document.ids[labelid]
if node.tagname in ('html_only', 'latex_only'):
for n in node:
if n.tagname == 'figure':
sectname = name
for c in n:
if c.tagname == 'caption':
sectname = c.astext()
break
node['ids'].remove(labelid)
node['names'].remove(name)
n['ids'].append(labelid)
n['names'].append(name)
document.settings.env.labels[name] = \
document.settings.env.docname, labelid, sectname
break
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
options = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.nonnegative_int,
'align': _option_align,
'class': directives.class_option,
'include-source': _option_boolean,
'format': _option_format,
'context': _option_context,
'nofigs': directives.flag,
'encoding': directives.encoding
}
app.add_directive('plot', plot_directive, True, (0, 2, False), **options)
app.add_config_value('plot_pre_code', None, True)
app.add_config_value('plot_include_source', False, True)
app.add_config_value('plot_html_show_source_link', True, True)
app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
app.add_config_value('plot_basedir', None, True)
app.add_config_value('plot_html_show_formats', True, True)
app.add_config_value('plot_rcparams', {}, True)
app.add_config_value('plot_apply_rcparams', False, True)
app.add_config_value('plot_working_directory', None, True)
app.add_config_value('plot_template', None, True)
app.connect(str('doctree-read'), mark_plot_labels)
metadata = {'parallel_read_safe': True, 'parallel_write_safe': True}
return metadata
#------------------------------------------------------------------------------
# Doctest handling
#------------------------------------------------------------------------------
def contains_doctest(text):
try:
# check if it's valid Python as-is
compile(text, '<string>', 'exec')
return False
except SyntaxError:
pass
r = re.compile(r'^\s*>>>', re.M)
m = r.search(text)
return bool(m)
def unescape_doctest(text):
"""
Extract code from a piece of text, which contains either Python code
or doctests.
"""
if not contains_doctest(text):
return text
code = ""
for line in text.split("\n"):
m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
if m:
code += m.group(2) + "\n"
elif line.strip():
code += "# " + line.strip() + "\n"
else:
code += "\n"
return code
def split_code_at_show(text):
"""
Split code at plt.show()
"""
parts = []
is_doctest = contains_doctest(text)
part = []
for line in text.split("\n"):
if (not is_doctest and line.strip() == 'plt.show()') or \
(is_doctest and line.strip() == '>>> plt.show()'):
part.append(line)
parts.append("\n".join(part))
part = []
else:
part.append(line)
if "\n".join(part).strip():
parts.append("\n".join(part))
return parts
def remove_coding(text):
"""
Remove the coding comment, which six.exec_ doesn't like.
"""
sub_re = re.compile("^#\s*-\*-\s*coding:\s*.*-\*-$", flags=re.MULTILINE)
return sub_re.sub("", text)
#------------------------------------------------------------------------------
# Template
#------------------------------------------------------------------------------
TEMPLATE = """
{{ source_code }}
{{ only_html }}
{% if source_link or (html_show_formats and not multi_image) %}
(
{%- if source_link -%}
`Source code <{{ source_link }}>`__
{%- endif -%}
{%- if html_show_formats and not multi_image -%}
{%- for img in images -%}
{%- for fmt in img.formats -%}
{%- if source_link or not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
{%- endfor -%}
{%- endif -%}
)
{% endif %}
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.{{ default_fmt }}
{% for option in options -%}
{{ option }}
{% endfor %}
{% if html_show_formats and multi_image -%}
(
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
)
{%- endif -%}
{{ caption }}
{% endfor %}
{{ only_latex }}
{% for img in images %}
{% if 'pdf' in img.formats -%}
.. figure:: {{ build_dir }}/{{ img.basename }}.pdf
{% for option in options -%}
{{ option }}
{% endfor %}
{{ caption }}
{% endif -%}
{% endfor %}
{{ only_texinfo }}
{% for img in images %}
.. image:: {{ build_dir }}/{{ img.basename }}.png
{% for option in options -%}
{{ option }}
{% endfor %}
{% endfor %}
"""
exception_template = """
.. htmlonly::
[`source code <%(linkdir)s/%(basename)s.py>`__]
Exception occurred rendering plot.
"""
# the context of the plot for all directives specified with the
# :context: option
plot_context = dict()
class ImageFile(object):
def __init__(self, basename, dirname):
self.basename = basename
self.dirname = dirname
self.formats = []
def filename(self, format):
return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
def filenames(self):
return [self.filename(fmt) for fmt in self.formats]
def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
"""
return (not os.path.exists(derived) or
(os.path.exists(original) and
os.stat(derived).st_mtime < os.stat(original).st_mtime))
class PlotError(RuntimeError):
pass
def run_code(code, code_path, ns=None, function_name=None):
"""
Import a Python module from a path, and run the function given by
name, if function_name is not None.
"""
# Change the working directory to the directory of the example, so
# it can get at its data files, if any. Add its path to sys.path
# so it can import any helper modules sitting beside it.
if six.PY2:
pwd = os.getcwdu()
else:
pwd = os.getcwd()
old_sys_path = list(sys.path)
if setup.config.plot_working_directory is not None:
try:
os.chdir(setup.config.plot_working_directory)
except OSError as err:
raise OSError(str(err) + '\n`plot_working_directory` option in'
'Sphinx configuration file must be a valid '
'directory path')
except TypeError as err:
raise TypeError(str(err) + '\n`plot_working_directory` option in '
'Sphinx configuration file must be a string or '
'None')
sys.path.insert(0, setup.config.plot_working_directory)
elif code_path is not None:
dirname = os.path.abspath(os.path.dirname(code_path))
os.chdir(dirname)
sys.path.insert(0, dirname)
# Reset sys.argv
old_sys_argv = sys.argv
sys.argv = [code_path]
# Redirect stdout
stdout = sys.stdout
if six.PY3:
sys.stdout = io.StringIO()
else:
sys.stdout = cStringIO.StringIO()
# Assign a do-nothing print function to the namespace. There
# doesn't seem to be any other way to provide a way to (not) print
# that works correctly across Python 2 and 3.
def _dummy_print(*arg, **kwarg):
pass
try:
try:
code = unescape_doctest(code)
if ns is None:
ns = {}
if not ns:
if setup.config.plot_pre_code is None:
six.exec_(six.text_type("import numpy as np\n" +
"from matplotlib import pyplot as plt\n"), ns)
else:
six.exec_(six.text_type(setup.config.plot_pre_code), ns)
ns['print'] = _dummy_print
if "__main__" in code:
six.exec_("__name__ = '__main__'", ns)
code = remove_coding(code)
six.exec_(code, ns)
if function_name is not None:
six.exec_(function_name + "()", ns)
except (Exception, SystemExit) as err:
raise PlotError(traceback.format_exc())
finally:
os.chdir(pwd)
sys.argv = old_sys_argv
sys.path[:] = old_sys_path
sys.stdout = stdout
return ns
def clear_state(plot_rcparams, close=True):
if close:
plt.close('all')
matplotlib.rc_file_defaults()
matplotlib.rcParams.update(plot_rcparams)
def get_plot_formats(config):
default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 200}
formats = []
plot_formats = config.plot_formats
if isinstance(plot_formats, six.string_types):
# String Sphinx < 1.3, Split on , to mimic
# Sphinx 1.3 and later. Sphinx 1.3 always
# returns a list.
plot_formats = plot_formats.split(',')
for fmt in plot_formats:
if isinstance(fmt, six.string_types):
if ':' in fmt:
suffix, dpi = fmt.split(':')
formats.append((str(suffix), int(dpi)))
else:
formats.append((fmt, default_dpi.get(fmt, 80)))
elif type(fmt) in (tuple, list) and len(fmt) == 2:
formats.append((str(fmt[0]), int(fmt[1])))
else:
raise PlotError('invalid image format "%r" in plot_formats' % fmt)
return formats
def render_figures(code, code_path, output_dir, output_base, context,
function_name, config, context_reset=False,
close_figs=False):
"""
Run a pyplot script and save the images in *output_dir*.
Save the images under *output_dir* with file names derived from
*output_base*
"""
formats = get_plot_formats(config)
# -- Try to determine if all images already exist
code_pieces = split_code_at_show(code)
# Look for single-figure output files first
all_exists = True
img = ImageFile(output_base, output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
if all_exists:
return [(code, [img])]
# Then look for multi-figure output files
results = []
all_exists = True
for i, code_piece in enumerate(code_pieces):
images = []
for j in xrange(1000):
if len(code_pieces) > 1:
img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir)
else:
img = ImageFile('%s_%02d' % (output_base, j), output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
# assume that if we have one, we have them all
if not all_exists:
all_exists = (j > 0)
break
images.append(img)
if not all_exists:
break
results.append((code_piece, images))
if all_exists:
return results
# We didn't find the files, so build them
results = []
if context:
ns = plot_context
else:
ns = {}
if context_reset:
clear_state(config.plot_rcparams)
plot_context.clear()
close_figs = not context or close_figs
for i, code_piece in enumerate(code_pieces):
if not context or config.plot_apply_rcparams:
clear_state(config.plot_rcparams, close_figs)
elif close_figs:
plt.close('all')
run_code(code_piece, code_path, ns, function_name)
images = []
fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
for j, figman in enumerate(fig_managers):
if len(fig_managers) == 1 and len(code_pieces) == 1:
img = ImageFile(output_base, output_dir)
elif len(code_pieces) == 1:
img = ImageFile("%s_%02d" % (output_base, j), output_dir)
else:
img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
output_dir)
images.append(img)
for format, dpi in formats:
try:
figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
except Exception as err:
raise PlotError(traceback.format_exc())
img.formats.append(format)
results.append((code_piece, images))
if not context or config.plot_apply_rcparams:
clear_state(config.plot_rcparams, close=not context)
return results
def run(arguments, content, options, state_machine, state, lineno):
document = state_machine.document
config = document.settings.env.config
nofigs = 'nofigs' in options
formats = get_plot_formats(config)
default_fmt = formats[0][0]
options.setdefault('include-source', config.plot_include_source)
keep_context = 'context' in options
context_opt = None if not keep_context else options['context']
rst_file = document.attributes['source']
rst_dir = os.path.dirname(rst_file)
if len(arguments):
if not config.plot_basedir:
source_file_name = os.path.join(setup.app.builder.srcdir,
directives.uri(arguments[0]))
else:
source_file_name = os.path.join(setup.confdir, config.plot_basedir,
directives.uri(arguments[0]))
# If there is content, it will be passed as a caption.
caption = '\n'.join(content)
# If the optional function name is provided, use it
if len(arguments) == 2:
function_name = arguments[1]
else:
function_name = None
with io.open(source_file_name, 'r', encoding='utf-8') as fd:
code = fd.read()
output_base = os.path.basename(source_file_name)
else:
source_file_name = rst_file
code = textwrap.dedent("\n".join(map(str, content)))
counter = document.attributes.get('_plot_counter', 0) + 1
document.attributes['_plot_counter'] = counter
base, ext = os.path.splitext(os.path.basename(source_file_name))
output_base = '%s-%d.py' % (base, counter)
function_name = None
caption = ''
base, source_ext = os.path.splitext(output_base)
if source_ext in ('.py', '.rst', '.txt'):
output_base = base
else:
source_ext = ''
# ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
output_base = output_base.replace('.', '-')
# is it in doctest format?
is_doctest = contains_doctest(code)
if 'format' in options:
if options['format'] == 'python':
is_doctest = False
else:
is_doctest = True
# determine output directory name fragment
source_rel_name = relpath(source_file_name, setup.confdir)
source_rel_dir = os.path.dirname(source_rel_name)
while source_rel_dir.startswith(os.path.sep):
source_rel_dir = source_rel_dir[1:]
# build_dir: where to place output files (temporarily)
build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
'plot_directive',
source_rel_dir)
# get rid of .. in paths, also changes pathsep
# see note in Python docs for warning about symbolic links on Windows.
# need to compare source and dest paths at end
build_dir = os.path.normpath(build_dir)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
# output_dir: final location in the builder's directory
dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
source_rel_dir))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir) # no problem here for me, but just use built-ins
# how to link to files from the RST file
dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
source_rel_dir).replace(os.path.sep, '/')
try:
build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
except ValueError:
# on Windows, relpath raises ValueError when path and start are on
# different mounts/drives
build_dir_link = build_dir
source_link = dest_dir_link + '/' + output_base + source_ext
# make figures
try:
results = render_figures(code,
source_file_name,
build_dir,
output_base,
keep_context,
function_name,
config,
context_reset=context_opt == 'reset',
close_figs=context_opt == 'close-figs')
errors = []
except PlotError as err:
reporter = state.memo.reporter
sm = reporter.system_message(
2, "Exception occurred in plotting %s\n from %s:\n%s" % (output_base,
source_file_name, err),
line=lineno)
results = [(code, [])]
errors = [sm]
# Properly indent the caption
caption = '\n'.join(' ' + line.strip()
for line in caption.split('\n'))
# generate output restructuredtext
total_lines = []
for j, (code_piece, images) in enumerate(results):
if options['include-source']:
if is_doctest:
lines = ['']
lines += [row.rstrip() for row in code_piece.split('\n')]
else:
lines = ['.. code-block:: python', '']
lines += [' %s' % row.rstrip()
for row in code_piece.split('\n')]
source_code = "\n".join(lines)
else:
source_code = ""
if nofigs:
images = []
opts = [':%s: %s' % (key, val) for key, val in six.iteritems(options)
if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
only_html = ".. only:: html"
only_latex = ".. only:: latex"
only_texinfo = ".. only:: texinfo"
# Not-None src_link signals the need for a source link in the generated
# html
if j == 0 and config.plot_html_show_source_link:
src_link = source_link
else:
src_link = None
result = format_template(
config.plot_template or TEMPLATE,
default_fmt=default_fmt,
dest_dir=dest_dir_link,
build_dir=build_dir_link,
source_link=src_link,
multi_image=len(images) > 1,
only_html=only_html,
only_latex=only_latex,
only_texinfo=only_texinfo,
options=opts,
images=images,
source_code=source_code,
html_show_formats=config.plot_html_show_formats and len(images),
caption=caption)
total_lines.extend(result.split("\n"))
total_lines.extend("\n")
if total_lines:
state_machine.insert_input(total_lines, source=source_file_name)
# copy image files to builder's output directory, if necessary
if not os.path.exists(dest_dir):
cbook.mkdirs(dest_dir)
for code_piece, images in results:
for img in images:
for fn in img.filenames():
destimg = os.path.join(dest_dir, os.path.basename(fn))
if fn != destimg:
shutil.copyfile(fn, destimg)
# copy script (if necessary)
target_name = os.path.join(dest_dir, output_base + source_ext)
with io.open(target_name, 'w', encoding="utf-8") as f:
if source_file_name == rst_file:
code_escaped = unescape_doctest(code)
else:
code_escaped = code
f.write(code_escaped)
return errors
| bsd-2-clause |
tawsifkhan/scikit-learn | examples/manifold/plot_mds.py | 261 | 2616 | """
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <[email protected]>
# Licence: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
plt.scatter(X_true[:, 0], X_true[:, 1], c='r', s=20)
plt.scatter(pos[:, 0], pos[:, 1], s=20, c='g')
plt.scatter(npos[:, 0], npos[:, 1], s=20, c='b')
plt.legend(('True position', 'MDS', 'NMDS'), loc='best')
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
| bsd-3-clause |
walterreade/scikit-learn | sklearn/tests/test_learning_curve.py | 59 | 10869 | # Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
| bsd-3-clause |
LiaoPan/scikit-learn | sklearn/manifold/tests/test_t_sne.py | 162 | 9771 | import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils import check_random_state
from sklearn.manifold.t_sne import _joint_probabilities
from sklearn.manifold.t_sne import _kl_divergence
from sklearn.manifold.t_sne import _gradient_descent
from sklearn.manifold.t_sne import trustworthiness
from sklearn.manifold.t_sne import TSNE
from sklearn.manifold._utils import _binary_search_perplexity
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
def test_gradient_descent_stops():
# Test stopping conditions of gradient descent.
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=1e-5, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 1.0)
assert_equal(it, 0)
assert("gradient norm" in out)
# Error difference
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.2, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.9)
assert_equal(it, 1)
assert("error difference" in out)
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function, np.zeros(1), 0, n_iter=100,
n_iter_without_progress=10, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=-1.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 11)
assert("did not make any progress" in out)
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 10)
assert("Iteration 10" in out)
def test_binary_search():
# Test if the binary search finds Gaussians with desired perplexity.
random_state = check_random_state(0)
distances = random_state.randn(50, 2)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, desired_perplexity, verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i])))
for i in range(P.shape[0])])
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_gradient():
# Test gradient of Kullback-Leibler divergence.
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)
fun = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[0]
grad = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
decimal=5)
def test_trustworthiness():
# Test trustworthiness score.
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert_equal(trustworthiness(X, 5.0 + X / 10.0), 1.0)
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert_less(trustworthiness(X, X_embedded), 0.6)
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
def test_preserve_trustworthiness_approximately():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
for init in ('random', 'pca'):
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
init=init, random_state=0)
X_embedded = tsne.fit_transform(X)
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_fit_csr_matrix():
# X can be a sparse matrix.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
random_state=0)
X_embedded = tsne.fit_transform(X_csr)
assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
metric="precomputed", random_state=0)
X_embedded = tsne.fit_transform(D)
assert_almost_equal(trustworthiness(D, X_embedded, n_neighbors=1,
precomputed=True), 1.0, decimal=1)
def test_early_exaggeration_too_small():
# Early exaggeration factor must be >= 1.
tsne = TSNE(early_exaggeration=0.99)
assert_raises_regexp(ValueError, "early_exaggeration .*",
tsne.fit_transform, np.array([[0.0]]))
def test_too_few_iterations():
# Number of gradient descent iterations must be at least 200.
tsne = TSNE(n_iter=199)
assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform,
np.array([[0.0]]))
def test_non_square_precomputed_distances():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed")
assert_raises_regexp(ValueError, ".* square distance matrix",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_init_not_available():
# 'init' must be 'pca' or 'random'.
assert_raises_regexp(ValueError, "'init' must be either 'pca' or 'random'",
TSNE, init="not available")
def test_distance_not_available():
# 'metric' must be valid.
tsne = TSNE(metric="not available")
assert_raises_regexp(ValueError, "Unknown metric not available.*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed", init="pca")
assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_verbose():
random_state = check_random_state(0)
tsne = TSNE(verbose=2)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[t-SNE]" in out)
assert("Computing pairwise distances" in out)
assert("Computed conditional probabilities" in out)
assert("Mean sigma" in out)
assert("Finished" in out)
assert("early exaggeration" in out)
assert("Finished" in out)
def test_chebyshev_metric():
# t-SNE should allow metrics that cannot be squared (issue #3526).
random_state = check_random_state(0)
tsne = TSNE(metric="chebyshev")
X = random_state.randn(5, 2)
tsne.fit_transform(X)
def test_reduction_to_one_component():
# t-SNE should allow reduction to one component (issue #4154).
random_state = check_random_state(0)
tsne = TSNE(n_components=1)
X = random_state.randn(5, 2)
X_embedded = tsne.fit(X).embedding_
assert(np.all(np.isfinite(X_embedded)))
| bsd-3-clause |
caidongyun/pylearn2 | pylearn2/scripts/browse_conv_weights.py | 44 | 7605 | #! /usr/bin/env python
"""
Interactive viewer for the convolutional weights in a pickled model.
Unlike ./show_weights, this shows one unit's weights at a time. This
allows it to display weights from higher levels (which can have 100s
of input channels), not just the first.
"""
import os
import sys
import warnings
import argparse
import numpy
from pylearn2.models.mlp import MLP, ConvElemwise, CompositeLayer
from pylearn2.models.maxout import MaxoutConvC01B
from pylearn2.utils import safe_zip, serial
from pylearn2.space import Conv2DSpace
try:
from matplotlib import pyplot
except ImportError as import_error:
warnings.warn("Can't use this script without matplotlib.")
pyplot = None
def _parse_args():
parser = argparse.ArgumentParser(
description=("Interactive browser of convolutional weights. "
"Up/down keys switch layers. "
"Left/right keys switch units."))
parser.add_argument('-i',
'--input',
required=True,
help=".pkl file of model")
result = parser.parse_args()
if os.path.splitext(result.input)[1] != '.pkl':
print("Expected --input to end in .pkl, got %s." % result.input)
sys.exit(1)
return result
def _get_conv_layers(layer, result=None):
'''
Returns a list of the convolutional layers in a model.
Returns
-------
rval: list
Lists the convolutional layers (ConvElemwise, MaxoutConvC01B).
'''
if result is None:
result = []
if isinstance(layer, (MLP, CompositeLayer)):
for sub_layer in layer.layers:
_get_conv_layers(sub_layer, result)
elif isinstance(layer, (MaxoutConvC01B, ConvElemwise)):
result.append(layer)
return result
def _get_conv_weights_bc01(layer):
'''
Returns a conv. layer's weights in BC01 format.
Parameters
----------
layer: MaxoutConvC01B or ConvElemwise
Returns
-------
rval: numpy.ndarray
The kernel weights in BC01 axis order. (B: output channels, C: input
channels)
'''
assert isinstance(layer, (MaxoutConvC01B, ConvElemwise))
weights = layer.get_params()[0].get_value()
if isinstance(layer, MaxoutConvC01B):
c01b = Conv2DSpace(shape=weights.shape[1:3],
num_channels=weights.shape[0],
axes=('c', 0, 1, 'b'))
bc01 = Conv2DSpace(shape=c01b.shape,
num_channels=c01b.num_channels,
axes=('b', 'c', 0, 1))
weights = c01b.np_format_as(weights, bc01)
elif isinstance(layer, ConvElemwise):
weights = weights[:, :, ::-1, ::-1] # reverse 0, 1 axes
return weights
def _num_conv_units(conv_layer):
'''
Returns a conv layer's number of output channels.
'''
assert isinstance(conv_layer, (MaxoutConvC01B, ConvElemwise))
weights = conv_layer.get_params()[0].get_value()
if isinstance(conv_layer, MaxoutConvC01B):
return weights.shape[-1]
elif isinstance(conv_layer, ConvElemwise):
return weights.shape[0]
def main():
"Entry point of script."
args = _parse_args()
model = serial.load(args.input)
if not isinstance(model, MLP):
print("Expected the .pkl file to contain an MLP, got a %s." %
str(model.type))
sys.exit(1)
def get_figure_and_axes(conv_layers, window_width=800):
kernel_display_width = 20
margin = 5
grid_square_width = kernel_display_width + margin
num_columns = window_width // grid_square_width
max_num_channels = numpy.max([layer.get_input_space().num_channels
for layer in conv_layers])
# pdb.set_trace()
num_rows = max_num_channels // num_columns
if num_rows * num_columns < max_num_channels:
num_rows += 1
assert num_rows * num_columns >= max_num_channels
window_width = 15
# '* 1.8' comse from the fact that rows take up about 1.8 times as much
# space as columns, due to the title text.
window_height = window_width * ((num_rows * 1.8) / num_columns)
figure, all_axes = pyplot.subplots(num_rows,
num_columns,
squeeze=False,
figsize=(window_width,
window_height))
for unit_index, axes in enumerate(all_axes.flat):
subplot_title = axes.set_title('%d' % unit_index)
subplot_title.set_size(8)
subplot_title.set_color((.3, .3, .3))
# Hides tickmarks
for axes_row in all_axes:
for axes in axes_row:
axes.get_xaxis().set_visible(False)
axes.get_yaxis().set_visible(False)
return figure, all_axes
conv_layers = _get_conv_layers(model)
figure, all_axes = get_figure_and_axes(conv_layers)
title_text = figure.suptitle("title")
pyplot.tight_layout(h_pad=.1, w_pad=.5) # in inches
layer_index = numpy.array(0)
unit_indices = numpy.zeros(len(model.layers), dtype=int)
def redraw():
'''
Draws the currently selected convolutional kernel.
'''
axes_list = all_axes.flatten()
layer = conv_layers[layer_index]
unit_index = unit_indices[layer_index, ...]
weights = _get_conv_weights_bc01(layer)[unit_index, ...]
active_axes = axes_list[:weights.shape[0]]
for axes, weights in safe_zip(active_axes, weights):
axes.set_visible(True)
axes.imshow(weights, cmap='gray', interpolation='nearest')
assert len(frozenset(active_axes)) == len(active_axes)
unused_axes = axes_list[len(active_axes):]
assert len(frozenset(unused_axes)) == len(unused_axes)
assert len(axes_list) == len(active_axes) + len(unused_axes)
for axes in unused_axes:
axes.set_visible(False)
title_text.set_text("Layer %s, unit %d" %
(layer.layer_name,
unit_indices[layer_index]))
figure.canvas.draw()
def on_key_press(event):
"Callback for key press events"
def increment(index, size, step):
"""
Increments an index in-place.
Parameters
----------
index: numpy.ndarray
scalar (0-dim array) of dtype=int. Non-negative.
size: int
One more than the maximum permissible index.
step: int
-1, 0, or 1.
"""
assert index >= 0
assert step in (0, -1, 1)
index[...] = (index + size + step) % size
if event.key in ('up', 'down'):
increment(layer_index,
len(conv_layers),
1 if event.key == 'up' else -1)
unit_index = unit_indices[layer_index]
redraw()
elif event.key in ('right', 'left'):
unit_index = unit_indices[layer_index:layer_index + 1]
increment(unit_index,
_num_conv_units(conv_layers[layer_index]),
1 if event.key == 'right' else -1)
redraw()
elif event.key == 'q':
sys.exit(0)
figure.canvas.mpl_connect('key_press_event', on_key_press)
redraw()
pyplot.show()
if __name__ == '__main__':
main()
| bsd-3-clause |
cmdunkers/DeeperMind | PythonEnv/lib/python2.7/site-packages/scipy/interpolate/fitpack.py | 16 | 46294 | #!/usr/bin/env python
"""
fitpack (dierckx in netlib) --- A Python-C wrapper to FITPACK (by P. Dierckx).
FITPACK is a collection of FORTRAN programs for curve and surface
fitting with splines and tensor product splines.
See
http://www.cs.kuleuven.ac.be/cwis/research/nalag/research/topics/fitpack.html
or
http://www.netlib.org/dierckx/index.html
Copyright 2002 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the SciPy (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
TODO: Make interfaces to the following fitpack functions:
For univariate splines: cocosp, concon, fourco, insert
For bivariate splines: profil, regrid, parsur, surev
"""
from __future__ import division, print_function, absolute_import
__all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde',
'bisplrep', 'bisplev', 'insert', 'splder', 'splantider']
import warnings
import numpy as np
from . import _fitpack
from numpy import (atleast_1d, array, ones, zeros, sqrt, ravel, transpose,
empty, iinfo, intc, asarray)
# Try to replace _fitpack interface with
# f2py-generated version
from . import dfitpack
def _intc_overflow(x, msg=None):
"""Cast the value to an intc and raise an OverflowError if the value
cannot fit.
"""
if x > iinfo(intc).max:
if msg is None:
msg = '%r cannot fit into an intc' % x
raise OverflowError(msg)
return intc(x)
_iermess = {
0: ["The spline has a residual sum of squares fp such that "
"abs(fp-s)/s<=0.001", None],
-1: ["The spline is an interpolating spline (fp=0)", None],
-2: ["The spline is weighted least-squares polynomial of degree k.\n"
"fp gives the upper bound fp0 for the smoothing factor s", None],
1: ["The required storage space exceeds the available storage space.\n"
"Probable causes: data (x,y) size is too small or smoothing parameter"
"\ns is too small (fp>s).", ValueError],
2: ["A theoretically impossible result when finding a smoothing spline\n"
"with fp = s. Probable cause: s too small. (abs(fp-s)/s>0.001)",
ValueError],
3: ["The maximal number of iterations (20) allowed for finding smoothing\n"
"spline with fp=s has been reached. Probable cause: s too small.\n"
"(abs(fp-s)/s>0.001)", ValueError],
10: ["Error on input data", ValueError],
'unknown': ["An error occurred", TypeError]
}
_iermess2 = {
0: ["The spline has a residual sum of squares fp such that "
"abs(fp-s)/s<=0.001", None],
-1: ["The spline is an interpolating spline (fp=0)", None],
-2: ["The spline is weighted least-squares polynomial of degree kx and ky."
"\nfp gives the upper bound fp0 for the smoothing factor s", None],
-3: ["Warning. The coefficients of the spline have been computed as the\n"
"minimal norm least-squares solution of a rank deficient system.",
None],
1: ["The required storage space exceeds the available storage space.\n"
"Probable causes: nxest or nyest too small or s is too small. (fp>s)",
ValueError],
2: ["A theoretically impossible result when finding a smoothing spline\n"
"with fp = s. Probable causes: s too small or badly chosen eps.\n"
"(abs(fp-s)/s>0.001)", ValueError],
3: ["The maximal number of iterations (20) allowed for finding smoothing\n"
"spline with fp=s has been reached. Probable cause: s too small.\n"
"(abs(fp-s)/s>0.001)", ValueError],
4: ["No more knots can be added because the number of B-spline\n"
"coefficients already exceeds the number of data points m.\n"
"Probable causes: either s or m too small. (fp>s)", ValueError],
5: ["No more knots can be added because the additional knot would\n"
"coincide with an old one. Probable cause: s too small or too large\n"
"a weight to an inaccurate data point. (fp>s)", ValueError],
10: ["Error on input data", ValueError],
11: ["rwrk2 too small, i.e. there is not enough workspace for computing\n"
"the minimal least-squares solution of a rank deficient system of\n"
"linear equations.", ValueError],
'unknown': ["An error occurred", TypeError]
}
_parcur_cache = {'t': array([], float), 'wrk': array([], float),
'iwrk': array([], intc), 'u': array([], float),
'ub': 0, 'ue': 1}
def splprep(x, w=None, u=None, ub=None, ue=None, k=3, task=0, s=None, t=None,
full_output=0, nest=None, per=0, quiet=1):
"""
Find the B-spline representation of an N-dimensional curve.
Given a list of N rank-1 arrays, `x`, which represent a curve in
N-dimensional space parametrized by `u`, find a smooth approximating
spline curve g(`u`). Uses the FORTRAN routine parcur from FITPACK.
Parameters
----------
x : array_like
A list of sample vector arrays representing the curve.
w : array_like, optional
Strictly positive rank-1 array of weights the same length as `x[0]`.
The weights are used in computing the weighted least-squares spline
fit. If the errors in the `x` values have standard-deviation given by
the vector d, then `w` should be 1/d. Default is ``ones(len(x[0]))``.
u : array_like, optional
An array of parameter values. If not given, these values are
calculated automatically as ``M = len(x[0])``, where
v[0] = 0
v[i] = v[i-1] + distance(`x[i]`, `x[i-1]`)
u[i] = v[i] / v[M-1]
ub, ue : int, optional
The end-points of the parameters interval. Defaults to
u[0] and u[-1].
k : int, optional
Degree of the spline. Cubic splines are recommended.
Even values of `k` should be avoided especially with a small s-value.
``1 <= k <= 5``, default is 3.
task : int, optional
If task==0 (default), find t and c for a given smoothing factor, s.
If task==1, find t and c for another value of the smoothing factor, s.
There must have been a previous call with task=0 or task=1
for the same set of data.
If task=-1 find the weighted least square spline for a given set of
knots, t.
s : float, optional
A smoothing condition. The amount of smoothness is determined by
satisfying the conditions: ``sum((w * (y - g))**2,axis=0) <= s``,
where g(x) is the smoothed interpolation of (x,y). The user can
use `s` to control the trade-off between closeness and smoothness
of fit. Larger `s` means more smoothing while smaller values of `s`
indicate less smoothing. Recommended values of `s` depend on the
weights, w. If the weights represent the inverse of the
standard-deviation of y, then a good `s` value should be found in
the range ``(m-sqrt(2*m),m+sqrt(2*m))``, where m is the number of
data points in x, y, and w.
t : int, optional
The knots needed for task=-1.
full_output : int, optional
If non-zero, then return optional outputs.
nest : int, optional
An over-estimate of the total number of knots of the spline to
help in determining the storage space. By default nest=m/2.
Always large enough is nest=m+k+1.
per : int, optional
If non-zero, data points are considered periodic with period
``x[m-1] - x[0]`` and a smooth periodic spline approximation is
returned. Values of ``y[m-1]`` and ``w[m-1]`` are not used.
quiet : int, optional
Non-zero to suppress messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : tuple
A tuple (t,c,k) containing the vector of knots, the B-spline
coefficients, and the degree of the spline.
u : array
An array of the values of the parameter.
fp : float
The weighted sum of squared residuals of the spline approximation.
ier : int
An integer flag about splrep success. Success is indicated
if ier<=0. If ier in [1,2,3] an error occurred but was not raised.
Otherwise an error is raised.
msg : str
A message corresponding to the integer flag, ier.
See Also
--------
splrep, splev, sproot, spalde, splint,
bisplrep, bisplev
UnivariateSpline, BivariateSpline
Notes
-----
See `splev` for evaluation of the spline and its derivatives.
The number of dimensions N must be smaller than 11.
References
----------
.. [1] P. Dierckx, "Algorithms for smoothing data with periodic and
parametric splines, Computer Graphics and Image Processing",
20 (1982) 171-184.
.. [2] P. Dierckx, "Algorithms for smoothing data with periodic and
parametric splines", report tw55, Dept. Computer Science,
K.U.Leuven, 1981.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs on
Numerical Analysis, Oxford University Press, 1993.
"""
if task <= 0:
_parcur_cache = {'t': array([], float), 'wrk': array([], float),
'iwrk': array([], intc), 'u': array([], float),
'ub': 0, 'ue': 1}
x = atleast_1d(x)
idim, m = x.shape
if per:
for i in range(idim):
if x[i][0] != x[i][-1]:
if quiet < 2:
warnings.warn(RuntimeWarning('Setting x[%d][%d]=x[%d][0]' %
(i, m, i)))
x[i][-1] = x[i][0]
if not 0 < idim < 11:
raise TypeError('0 < idim < 11 must hold')
if w is None:
w = ones(m, float)
else:
w = atleast_1d(w)
ipar = (u is not None)
if ipar:
_parcur_cache['u'] = u
if ub is None:
_parcur_cache['ub'] = u[0]
else:
_parcur_cache['ub'] = ub
if ue is None:
_parcur_cache['ue'] = u[-1]
else:
_parcur_cache['ue'] = ue
else:
_parcur_cache['u'] = zeros(m, float)
if not (1 <= k <= 5):
raise TypeError('1 <= k= %d <=5 must hold' % k)
if not (-1 <= task <= 1):
raise TypeError('task must be -1, 0 or 1')
if (not len(w) == m) or (ipar == 1 and (not len(u) == m)):
raise TypeError('Mismatch of input dimensions')
if s is None:
s = m - sqrt(2*m)
if t is None and task == -1:
raise TypeError('Knots must be given for task=-1')
if t is not None:
_parcur_cache['t'] = atleast_1d(t)
n = len(_parcur_cache['t'])
if task == -1 and n < 2*k + 2:
raise TypeError('There must be at least 2*k+2 knots for task=-1')
if m <= k:
raise TypeError('m > k must hold')
if nest is None:
nest = m + 2*k
if (task >= 0 and s == 0) or (nest < 0):
if per:
nest = m + 2*k
else:
nest = m + k + 1
nest = max(nest, 2*k + 3)
u = _parcur_cache['u']
ub = _parcur_cache['ub']
ue = _parcur_cache['ue']
t = _parcur_cache['t']
wrk = _parcur_cache['wrk']
iwrk = _parcur_cache['iwrk']
t, c, o = _fitpack._parcur(ravel(transpose(x)), w, u, ub, ue, k,
task, ipar, s, t, nest, wrk, iwrk, per)
_parcur_cache['u'] = o['u']
_parcur_cache['ub'] = o['ub']
_parcur_cache['ue'] = o['ue']
_parcur_cache['t'] = t
_parcur_cache['wrk'] = o['wrk']
_parcur_cache['iwrk'] = o['iwrk']
ier = o['ier']
fp = o['fp']
n = len(t)
u = o['u']
c.shape = idim, n - k - 1
tcku = [t, list(c), k], u
if ier <= 0 and not quiet:
warnings.warn(RuntimeWarning(_iermess[ier][0] +
"\tk=%d n=%d m=%d fp=%f s=%f" %
(k, len(t), m, fp, s)))
if ier > 0 and not full_output:
if ier in [1, 2, 3]:
warnings.warn(RuntimeWarning(_iermess[ier][0]))
else:
try:
raise _iermess[ier][1](_iermess[ier][0])
except KeyError:
raise _iermess['unknown'][1](_iermess['unknown'][0])
if full_output:
try:
return tcku, fp, ier, _iermess[ier][0]
except KeyError:
return tcku, fp, ier, _iermess['unknown'][0]
else:
return tcku
_curfit_cache = {'t': array([], float), 'wrk': array([], float),
'iwrk': array([], intc)}
def splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None,
full_output=0, per=0, quiet=1):
"""
Find the B-spline representation of 1-D curve.
Given the set of data points ``(x[i], y[i])`` determine a smooth spline
approximation of degree k on the interval ``xb <= x <= xe``.
Parameters
----------
x, y : array_like
The data points defining a curve y = f(x).
w : array_like, optional
Strictly positive rank-1 array of weights the same length as x and y.
The weights are used in computing the weighted least-squares spline
fit. If the errors in the y values have standard-deviation given by the
vector d, then w should be 1/d. Default is ones(len(x)).
xb, xe : float, optional
The interval to fit. If None, these default to x[0] and x[-1]
respectively.
k : int, optional
The order of the spline fit. It is recommended to use cubic splines.
Even order splines should be avoided especially with small s values.
1 <= k <= 5
task : {1, 0, -1}, optional
If task==0 find t and c for a given smoothing factor, s.
If task==1 find t and c for another value of the smoothing factor, s.
There must have been a previous call with task=0 or task=1 for the same
set of data (t will be stored an used internally)
If task=-1 find the weighted least square spline for a given set of
knots, t. These should be interior knots as knots on the ends will be
added automatically.
s : float, optional
A smoothing condition. The amount of smoothness is determined by
satisfying the conditions: sum((w * (y - g))**2,axis=0) <= s where g(x)
is the smoothed interpolation of (x,y). The user can use s to control
the tradeoff between closeness and smoothness of fit. Larger s means
more smoothing while smaller values of s indicate less smoothing.
Recommended values of s depend on the weights, w. If the weights
represent the inverse of the standard-deviation of y, then a good s
value should be found in the range (m-sqrt(2*m),m+sqrt(2*m)) where m is
the number of datapoints in x, y, and w. default : s=m-sqrt(2*m) if
weights are supplied. s = 0.0 (interpolating) if no weights are
supplied.
t : array_like, optional
The knots needed for task=-1. If given then task is automatically set
to -1.
full_output : bool, optional
If non-zero, then return optional outputs.
per : bool, optional
If non-zero, data points are considered periodic with period x[m-1] -
x[0] and a smooth periodic spline approximation is returned. Values of
y[m-1] and w[m-1] are not used.
quiet : bool, optional
Non-zero to suppress messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : tuple
(t,c,k) a tuple containing the vector of knots, the B-spline
coefficients, and the degree of the spline.
fp : array, optional
The weighted sum of squared residuals of the spline approximation.
ier : int, optional
An integer flag about splrep success. Success is indicated if ier<=0.
If ier in [1,2,3] an error occurred but was not raised. Otherwise an
error is raised.
msg : str, optional
A message corresponding to the integer flag, ier.
Notes
-----
See splev for evaluation of the spline and its derivatives.
The user is responsible for assuring that the values of *x* are unique.
Otherwise, *splrep* will not return sensible results.
See Also
--------
UnivariateSpline, BivariateSpline
splprep, splev, sproot, spalde, splint
bisplrep, bisplev
Notes
-----
See splev for evaluation of the spline and its derivatives. Uses the
FORTRAN routine curfit from FITPACK.
If provided, knots `t` must satisfy the Schoenberg-Whitney conditions,
i.e., there must be a subset of data points ``x[j]`` such that
``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
References
----------
Based on algorithms described in [1]_, [2]_, [3]_, and [4]_:
.. [1] P. Dierckx, "An algorithm for smoothing, differentiation and
integration of experimental data using spline functions",
J.Comp.Appl.Maths 1 (1975) 165-184.
.. [2] P. Dierckx, "A fast algorithm for smoothing data on a rectangular
grid while using spline functions", SIAM J.Numer.Anal. 19 (1982)
1286-1304.
.. [3] P. Dierckx, "An improved algorithm for curve fitting with spline
functions", report tw54, Dept. Computer Science,K.U. Leuven, 1981.
.. [4] P. Dierckx, "Curve and surface fitting with splines", Monographs on
Numerical Analysis, Oxford University Press, 1993.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import splev, splrep
>>> x = np.linspace(0, 10, 10)
>>> y = np.sin(x)
>>> tck = splrep(x, y)
>>> x2 = np.linspace(0, 10, 200)
>>> y2 = splev(x2, tck)
>>> plt.plot(x, y, 'o', x2, y2)
>>> plt.show()
"""
if task <= 0:
_curfit_cache = {}
x, y = map(atleast_1d, [x, y])
m = len(x)
if w is None:
w = ones(m, float)
if s is None:
s = 0.0
else:
w = atleast_1d(w)
if s is None:
s = m - sqrt(2*m)
if not len(w) == m:
raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m))
if (m != len(y)) or (m != len(w)):
raise TypeError('Lengths of the first three arguments (x,y,w) must '
'be equal')
if not (1 <= k <= 5):
raise TypeError('Given degree of the spline (k=%d) is not supported. '
'(1<=k<=5)' % k)
if m <= k:
raise TypeError('m > k must hold')
if xb is None:
xb = x[0]
if xe is None:
xe = x[-1]
if not (-1 <= task <= 1):
raise TypeError('task must be -1, 0 or 1')
if t is not None:
task = -1
if task == -1:
if t is None:
raise TypeError('Knots must be given for task=-1')
numknots = len(t)
_curfit_cache['t'] = empty((numknots + 2*k + 2,), float)
_curfit_cache['t'][k+1:-k-1] = t
nest = len(_curfit_cache['t'])
elif task == 0:
if per:
nest = max(m + 2*k, 2*k + 3)
else:
nest = max(m + k + 1, 2*k + 3)
t = empty((nest,), float)
_curfit_cache['t'] = t
if task <= 0:
if per:
_curfit_cache['wrk'] = empty((m*(k + 1) + nest*(8 + 5*k),), float)
else:
_curfit_cache['wrk'] = empty((m*(k + 1) + nest*(7 + 3*k),), float)
_curfit_cache['iwrk'] = empty((nest,), intc)
try:
t = _curfit_cache['t']
wrk = _curfit_cache['wrk']
iwrk = _curfit_cache['iwrk']
except KeyError:
raise TypeError("must call with task=1 only after"
" call with task=0,-1")
if not per:
n, c, fp, ier = dfitpack.curfit(task, x, y, w, t, wrk, iwrk,
xb, xe, k, s)
else:
n, c, fp, ier = dfitpack.percur(task, x, y, w, t, wrk, iwrk, k, s)
tck = (t[:n], c[:n], k)
if ier <= 0 and not quiet:
_mess = (_iermess[ier][0] + "\tk=%d n=%d m=%d fp=%f s=%f" %
(k, len(t), m, fp, s))
warnings.warn(RuntimeWarning(_mess))
if ier > 0 and not full_output:
if ier in [1, 2, 3]:
warnings.warn(RuntimeWarning(+_iermess[ier][0]))
else:
try:
raise _iermess[ier][1](_iermess[ier][0])
except KeyError:
raise _iermess['unknown'][1](_iermess['unknown'][0])
if full_output:
try:
return tck, fp, ier, _iermess[ier][0]
except KeyError:
return tck, fp, ier, _iermess['unknown'][0]
else:
return tck
def _ntlist(l): # return non-trivial list
return l
# if len(l)>1: return l
# return l[0]
def splev(x, tck, der=0, ext=0):
"""
Evaluate a B-spline or its derivatives.
Given the knots and coefficients of a B-spline representation, evaluate
the value of the smoothing polynomial and its derivatives. This is a
wrapper around the FORTRAN routines splev and splder of FITPACK.
Parameters
----------
x : array_like
An array of points at which to return the value of the smoothed
spline or its derivatives. If `tck` was returned from `splprep`,
then the parameter values, u should be given.
tck : tuple
A sequence of length 3 returned by `splrep` or `splprep` containing
the knots, coefficients, and degree of the spline.
der : int, optional
The order of derivative of the spline to compute (must be less than
or equal to k).
ext : int, optional
Controls the value returned for elements of ``x`` not in the
interval defined by the knot sequence.
* if ext=0, return the extrapolated value.
* if ext=1, return 0
* if ext=2, raise a ValueError
* if ext=3, return the boundary value.
The default value is 0.
Returns
-------
y : ndarray or list of ndarrays
An array of values representing the spline function evaluated at
the points in ``x``. If `tck` was returned from `splprep`, then this
is a list of arrays representing the curve in N-dimensional space.
See Also
--------
splprep, splrep, sproot, spalde, splint
bisplrep, bisplev
References
----------
.. [1] C. de Boor, "On calculating with b-splines", J. Approximation
Theory, 6, p.50-62, 1972.
.. [2] M.G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
Applics, 10, p.134-149, 1972.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
t, c, k = tck
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
return list(map(lambda c, x=x, t=t, k=k, der=der:
splev(x, [t, c, k], der, ext), c))
else:
if not (0 <= der <= k):
raise ValueError("0<=der=%d<=k=%d must hold" % (der, k))
if ext not in (0, 1, 2, 3):
raise ValueError("ext = %s not in (0, 1, 2, 3) " % ext)
x = asarray(x)
shape = x.shape
x = atleast_1d(x).ravel()
y, ier = _fitpack._spl_(x, der, t, c, k, ext)
if ier == 10:
raise ValueError("Invalid input data")
if ier == 1:
raise ValueError("Found x value not in the domain")
if ier:
raise TypeError("An error occurred")
return y.reshape(shape)
def splint(a, b, tck, full_output=0):
"""
Evaluate the definite integral of a B-spline.
Given the knots and coefficients of a B-spline, evaluate the definite
integral of the smoothing polynomial between two given points.
Parameters
----------
a, b : float
The end-points of the integration interval.
tck : tuple
A tuple (t,c,k) containing the vector of knots, the B-spline
coefficients, and the degree of the spline (see `splev`).
full_output : int, optional
Non-zero to return optional output.
Returns
-------
integral : float
The resulting integral.
wrk : ndarray
An array containing the integrals of the normalized B-splines
defined on the set of knots.
Notes
-----
splint silently assumes that the spline function is zero outside the data
interval (a, b).
See Also
--------
splprep, splrep, sproot, spalde, splev
bisplrep, bisplev
UnivariateSpline, BivariateSpline
References
----------
.. [1] P.W. Gaffney, The calculation of indefinite integrals of b-splines",
J. Inst. Maths Applics, 17, p.37-41, 1976.
.. [2] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
t, c, k = tck
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
return _ntlist(list(map(lambda c, a=a, b=b, t=t, k=k:
splint(a, b, [t, c, k]), c)))
else:
aint, wrk = _fitpack._splint(t, c, k, a, b)
if full_output:
return aint, wrk
else:
return aint
def sproot(tck, mest=10):
"""
Find the roots of a cubic B-spline.
Given the knots (>=8) and coefficients of a cubic B-spline return the
roots of the spline.
Parameters
----------
tck : tuple
A tuple (t,c,k) containing the vector of knots,
the B-spline coefficients, and the degree of the spline.
The number of knots must be >= 8, and the degree must be 3.
The knots must be a montonically increasing sequence.
mest : int, optional
An estimate of the number of zeros (Default is 10).
Returns
-------
zeros : ndarray
An array giving the roots of the spline.
See also
--------
splprep, splrep, splint, spalde, splev
bisplrep, bisplev
UnivariateSpline, BivariateSpline
References
----------
.. [1] C. de Boor, "On calculating with b-splines", J. Approximation
Theory, 6, p.50-62, 1972.
.. [2] M.G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
Applics, 10, p.134-149, 1972.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
t, c, k = tck
if k != 3:
raise ValueError("sproot works only for cubic (k=3) splines")
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
return _ntlist(list(map(lambda c, t=t, k=k, mest=mest:
sproot([t, c, k], mest), c)))
else:
if len(t) < 8:
raise TypeError("The number of knots %d>=8" % len(t))
z, ier = _fitpack._sproot(t, c, k, mest)
if ier == 10:
raise TypeError("Invalid input data. "
"t1<=..<=t4<t5<..<tn-3<=..<=tn must hold.")
if ier == 0:
return z
if ier == 1:
warnings.warn(RuntimeWarning("The number of zeros exceeds mest"))
return z
raise TypeError("Unknown error")
def spalde(x, tck):
"""
Evaluate all derivatives of a B-spline.
Given the knots and coefficients of a cubic B-spline compute all
derivatives up to order k at a point (or set of points).
Parameters
----------
x : array_like
A point or a set of points at which to evaluate the derivatives.
Note that ``t(k) <= x <= t(n-k+1)`` must hold for each `x`.
tck : tuple
A tuple (t,c,k) containing the vector of knots,
the B-spline coefficients, and the degree of the spline.
Returns
-------
results : {ndarray, list of ndarrays}
An array (or a list of arrays) containing all derivatives
up to order k inclusive for each point `x`.
See Also
--------
splprep, splrep, splint, sproot, splev, bisplrep, bisplev,
UnivariateSpline, BivariateSpline
References
----------
.. [1] de Boor C : On calculating with b-splines, J. Approximation Theory
6 (1972) 50-62.
.. [2] Cox M.G. : The numerical evaluation of b-splines, J. Inst. Maths
applics 10 (1972) 134-149.
.. [3] Dierckx P. : Curve and surface fitting with splines, Monographs on
Numerical Analysis, Oxford University Press, 1993.
"""
t, c, k = tck
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
return _ntlist(list(map(lambda c, x=x, t=t, k=k:
spalde(x, [t, c, k]), c)))
else:
x = atleast_1d(x)
if len(x) > 1:
return list(map(lambda x, tck=tck: spalde(x, tck), x))
d, ier = _fitpack._spalde(t, c, k, x[0])
if ier == 0:
return d
if ier == 10:
raise TypeError("Invalid input data. t(k)<=x<=t(n-k+1) must hold.")
raise TypeError("Unknown error")
# def _curfit(x,y,w=None,xb=None,xe=None,k=3,task=0,s=None,t=None,
# full_output=0,nest=None,per=0,quiet=1):
_surfit_cache = {'tx': array([], float), 'ty': array([], float),
'wrk': array([], float), 'iwrk': array([], intc)}
def bisplrep(x, y, z, w=None, xb=None, xe=None, yb=None, ye=None,
kx=3, ky=3, task=0, s=None, eps=1e-16, tx=None, ty=None,
full_output=0, nxest=None, nyest=None, quiet=1):
"""
Find a bivariate B-spline representation of a surface.
Given a set of data points (x[i], y[i], z[i]) representing a surface
z=f(x,y), compute a B-spline representation of the surface. Based on
the routine SURFIT from FITPACK.
Parameters
----------
x, y, z : ndarray
Rank-1 arrays of data points.
w : ndarray, optional
Rank-1 array of weights. By default ``w=np.ones(len(x))``.
xb, xe : float, optional
End points of approximation interval in `x`.
By default ``xb = x.min(), xe=x.max()``.
yb, ye : float, optional
End points of approximation interval in `y`.
By default ``yb=y.min(), ye = y.max()``.
kx, ky : int, optional
The degrees of the spline (1 <= kx, ky <= 5).
Third order (kx=ky=3) is recommended.
task : int, optional
If task=0, find knots in x and y and coefficients for a given
smoothing factor, s.
If task=1, find knots and coefficients for another value of the
smoothing factor, s. bisplrep must have been previously called
with task=0 or task=1.
If task=-1, find coefficients for a given set of knots tx, ty.
s : float, optional
A non-negative smoothing factor. If weights correspond
to the inverse of the standard-deviation of the errors in z,
then a good s-value should be found in the range
``(m-sqrt(2*m),m+sqrt(2*m))`` where m=len(x).
eps : float, optional
A threshold for determining the effective rank of an
over-determined linear system of equations (0 < eps < 1).
`eps` is not likely to need changing.
tx, ty : ndarray, optional
Rank-1 arrays of the knots of the spline for task=-1
full_output : int, optional
Non-zero to return optional outputs.
nxest, nyest : int, optional
Over-estimates of the total number of knots. If None then
``nxest = max(kx+sqrt(m/2),2*kx+3)``,
``nyest = max(ky+sqrt(m/2),2*ky+3)``.
quiet : int, optional
Non-zero to suppress printing of messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : array_like
A list [tx, ty, c, kx, ky] containing the knots (tx, ty) and
coefficients (c) of the bivariate B-spline representation of the
surface along with the degree of the spline.
fp : ndarray
The weighted sum of squared residuals of the spline approximation.
ier : int
An integer flag about splrep success. Success is indicated if
ier<=0. If ier in [1,2,3] an error occurred but was not raised.
Otherwise an error is raised.
msg : str
A message corresponding to the integer flag, ier.
See Also
--------
splprep, splrep, splint, sproot, splev
UnivariateSpline, BivariateSpline
Notes
-----
See `bisplev` to evaluate the value of the B-spline given its tck
representation.
References
----------
.. [1] Dierckx P.:An algorithm for surface fitting with spline functions
Ima J. Numer. Anal. 1 (1981) 267-283.
.. [2] Dierckx P.:An algorithm for surface fitting with spline functions
report tw50, Dept. Computer Science,K.U.Leuven, 1980.
.. [3] Dierckx P.:Curve and surface fitting with splines, Monographs on
Numerical Analysis, Oxford University Press, 1993.
"""
x, y, z = map(ravel, [x, y, z]) # ensure 1-d arrays.
m = len(x)
if not (m == len(y) == len(z)):
raise TypeError('len(x)==len(y)==len(z) must hold.')
if w is None:
w = ones(m, float)
else:
w = atleast_1d(w)
if not len(w) == m:
raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m))
if xb is None:
xb = x.min()
if xe is None:
xe = x.max()
if yb is None:
yb = y.min()
if ye is None:
ye = y.max()
if not (-1 <= task <= 1):
raise TypeError('task must be -1, 0 or 1')
if s is None:
s = m - sqrt(2*m)
if tx is None and task == -1:
raise TypeError('Knots_x must be given for task=-1')
if tx is not None:
_surfit_cache['tx'] = atleast_1d(tx)
nx = len(_surfit_cache['tx'])
if ty is None and task == -1:
raise TypeError('K nots_y must be given for task=-1')
if ty is not None:
_surfit_cache['ty'] = atleast_1d(ty)
ny = len(_surfit_cache['ty'])
if task == -1 and nx < 2*kx+2:
raise TypeError('There must be at least 2*kx+2 knots_x for task=-1')
if task == -1 and ny < 2*ky+2:
raise TypeError('There must be at least 2*ky+2 knots_x for task=-1')
if not ((1 <= kx <= 5) and (1 <= ky <= 5)):
raise TypeError('Given degree of the spline (kx,ky=%d,%d) is not '
'supported. (1<=k<=5)' % (kx, ky))
if m < (kx + 1)*(ky + 1):
raise TypeError('m >= (kx+1)(ky+1) must hold')
if nxest is None:
nxest = int(kx + sqrt(m/2))
if nyest is None:
nyest = int(ky + sqrt(m/2))
nxest, nyest = max(nxest, 2*kx + 3), max(nyest, 2*ky + 3)
if task >= 0 and s == 0:
nxest = int(kx + sqrt(3*m))
nyest = int(ky + sqrt(3*m))
if task == -1:
_surfit_cache['tx'] = atleast_1d(tx)
_surfit_cache['ty'] = atleast_1d(ty)
tx, ty = _surfit_cache['tx'], _surfit_cache['ty']
wrk = _surfit_cache['wrk']
u = nxest - kx - 1
v = nyest - ky - 1
km = max(kx, ky) + 1
ne = max(nxest, nyest)
bx, by = kx*v + ky + 1, ky*u + kx + 1
b1, b2 = bx, bx + v - ky
if bx > by:
b1, b2 = by, by + u - kx
msg = "Too many data points to interpolate"
lwrk1 = _intc_overflow(u*v*(2 + b1 + b2) +
2*(u + v + km*(m + ne) + ne - kx - ky) + b2 + 1,
msg=msg)
lwrk2 = _intc_overflow(u*v*(b2 + 1) + b2, msg=msg)
tx, ty, c, o = _fitpack._surfit(x, y, z, w, xb, xe, yb, ye, kx, ky,
task, s, eps, tx, ty, nxest, nyest,
wrk, lwrk1, lwrk2)
_curfit_cache['tx'] = tx
_curfit_cache['ty'] = ty
_curfit_cache['wrk'] = o['wrk']
ier, fp = o['ier'], o['fp']
tck = [tx, ty, c, kx, ky]
ierm = min(11, max(-3, ier))
if ierm <= 0 and not quiet:
_mess = (_iermess2[ierm][0] +
"\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f" %
(kx, ky, len(tx), len(ty), m, fp, s))
warnings.warn(RuntimeWarning(_mess))
if ierm > 0 and not full_output:
if ier in [1, 2, 3, 4, 5]:
_mess = ("\n\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f" %
(kx, ky, len(tx), len(ty), m, fp, s))
warnings.warn(RuntimeWarning(_iermess2[ierm][0] + _mess))
else:
try:
raise _iermess2[ierm][1](_iermess2[ierm][0])
except KeyError:
raise _iermess2['unknown'][1](_iermess2['unknown'][0])
if full_output:
try:
return tck, fp, ier, _iermess2[ierm][0]
except KeyError:
return tck, fp, ier, _iermess2['unknown'][0]
else:
return tck
def bisplev(x, y, tck, dx=0, dy=0):
"""
Evaluate a bivariate B-spline and its derivatives.
Return a rank-2 array of spline function values (or spline derivative
values) at points given by the cross-product of the rank-1 arrays `x` and
`y`. In special cases, return an array or just a float if either `x` or
`y` or both are floats. Based on BISPEV from FITPACK.
Parameters
----------
x, y : ndarray
Rank-1 arrays specifying the domain over which to evaluate the
spline or its derivative.
tck : tuple
A sequence of length 5 returned by `bisplrep` containing the knot
locations, the coefficients, and the degree of the spline:
[tx, ty, c, kx, ky].
dx, dy : int, optional
The orders of the partial derivatives in `x` and `y` respectively.
Returns
-------
vals : ndarray
The B-spline or its derivative evaluated over the set formed by
the cross-product of `x` and `y`.
See Also
--------
splprep, splrep, splint, sproot, splev
UnivariateSpline, BivariateSpline
Notes
-----
See `bisplrep` to generate the `tck` representation.
References
----------
.. [1] Dierckx P. : An algorithm for surface fitting
with spline functions
Ima J. Numer. Anal. 1 (1981) 267-283.
.. [2] Dierckx P. : An algorithm for surface fitting
with spline functions
report tw50, Dept. Computer Science,K.U.Leuven, 1980.
.. [3] Dierckx P. : Curve and surface fitting with splines,
Monographs on Numerical Analysis, Oxford University Press, 1993.
"""
tx, ty, c, kx, ky = tck
if not (0 <= dx < kx):
raise ValueError("0 <= dx = %d < kx = %d must hold" % (dx, kx))
if not (0 <= dy < ky):
raise ValueError("0 <= dy = %d < ky = %d must hold" % (dy, ky))
x, y = map(atleast_1d, [x, y])
if (len(x.shape) != 1) or (len(y.shape) != 1):
raise ValueError("First two entries should be rank-1 arrays.")
z, ier = _fitpack._bispev(tx, ty, c, kx, ky, x, y, dx, dy)
if ier == 10:
raise ValueError("Invalid input data")
if ier:
raise TypeError("An error occurred")
z.shape = len(x), len(y)
if len(z) > 1:
return z
if len(z[0]) > 1:
return z[0]
return z[0][0]
def dblint(xa, xb, ya, yb, tck):
"""Evaluate the integral of a spline over area [xa,xb] x [ya,yb].
Parameters
----------
xa, xb : float
The end-points of the x integration interval.
ya, yb : float
The end-points of the y integration interval.
tck : list [tx, ty, c, kx, ky]
A sequence of length 5 returned by bisplrep containing the knot
locations tx, ty, the coefficients c, and the degrees kx, ky
of the spline.
Returns
-------
integ : float
The value of the resulting integral.
"""
tx, ty, c, kx, ky = tck
return dfitpack.dblint(tx, ty, c, kx, ky, xa, xb, ya, yb)
def insert(x, tck, m=1, per=0):
"""
Insert knots into a B-spline.
Given the knots and coefficients of a B-spline representation, create a
new B-spline with a knot inserted `m` times at point `x`.
This is a wrapper around the FORTRAN routine insert of FITPACK.
Parameters
----------
x (u) : array_like
A 1-D point at which to insert a new knot(s). If `tck` was returned
from ``splprep``, then the parameter values, u should be given.
tck : tuple
A tuple (t,c,k) returned by ``splrep`` or ``splprep`` containing
the vector of knots, the B-spline coefficients,
and the degree of the spline.
m : int, optional
The number of times to insert the given knot (its multiplicity).
Default is 1.
per : int, optional
If non-zero, the input spline is considered periodic.
Returns
-------
tck : tuple
A tuple (t,c,k) containing the vector of knots, the B-spline
coefficients, and the degree of the new spline.
``t(k+1) <= x <= t(n-k)``, where k is the degree of the spline.
In case of a periodic spline (``per != 0``) there must be
either at least k interior knots t(j) satisfying ``t(k+1)<t(j)<=x``
or at least k interior knots t(j) satisfying ``x<=t(j)<t(n-k)``.
Notes
-----
Based on algorithms from [1]_ and [2]_.
References
----------
.. [1] W. Boehm, "Inserting new knots into b-spline curves.",
Computer Aided Design, 12, p.199-201, 1980.
.. [2] P. Dierckx, "Curve and surface fitting with splines, Monographs on
Numerical Analysis", Oxford University Press, 1993.
"""
t, c, k = tck
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
cc = []
for c_vals in c:
tt, cc_val, kk = insert(x, [t, c_vals, k], m)
cc.append(cc_val)
return (tt, cc, kk)
else:
tt, cc, ier = _fitpack._insert(per, t, c, k, x, m)
if ier == 10:
raise ValueError("Invalid input data")
if ier:
raise TypeError("An error occurred")
return (tt, cc, k)
def splder(tck, n=1):
"""
Compute the spline representation of the derivative of a given spline
Parameters
----------
tck : tuple of (t, c, k)
Spline whose derivative to compute
n : int, optional
Order of derivative to evaluate. Default: 1
Returns
-------
tck_der : tuple of (t2, c2, k2)
Spline of order k2=k-n representing the derivative
of the input spline.
Notes
-----
.. versionadded:: 0.13.0
See Also
--------
splantider, splev, spalde
Examples
--------
This can be used for finding maxima of a curve:
>>> from scipy.interpolate import splrep, splder, sproot
>>> x = np.linspace(0, 10, 70)
>>> y = np.sin(x)
>>> spl = splrep(x, y, k=4)
Now, differentiate the spline and find the zeros of the
derivative. (NB: `sproot` only works for order 3 splines, so we
fit an order 4 spline):
>>> dspl = splder(spl)
>>> sproot(dspl) / np.pi
array([ 0.50000001, 1.5 , 2.49999998])
This agrees well with roots :math:`\pi/2 + n\pi` of
:math:`\cos(x) = \sin'(x)`.
"""
if n < 0:
return splantider(tck, -n)
t, c, k = tck
if n > k:
raise ValueError(("Order of derivative (n = %r) must be <= "
"order of spline (k = %r)") % (n, tck[2]))
with np.errstate(invalid='raise', divide='raise'):
try:
for j in range(n):
# See e.g. Schumaker, Spline Functions: Basic Theory, Chapter 5
# Compute the denominator in the differentiation formula.
dt = t[k+1:-1] - t[1:-k-1]
# Compute the new coefficients
c = (c[1:-1-k] - c[:-2-k]) * k / dt
# Pad coefficient array to same size as knots (FITPACK
# convention)
c = np.r_[c, [0]*k]
# Adjust knots
t = t[1:-1]
k -= 1
except FloatingPointError:
raise ValueError(("The spline has internal repeated knots "
"and is not differentiable %d times") % n)
return t, c, k
def splantider(tck, n=1):
"""
Compute the spline for the antiderivative (integral) of a given spline.
Parameters
----------
tck : tuple of (t, c, k)
Spline whose antiderivative to compute
n : int, optional
Order of antiderivative to evaluate. Default: 1
Returns
-------
tck_ader : tuple of (t2, c2, k2)
Spline of order k2=k+n representing the antiderivative of the input
spline.
See Also
--------
splder, splev, spalde
Notes
-----
The `splder` function is the inverse operation of this function.
Namely, ``splder(splantider(tck))`` is identical to `tck`, modulo
rounding error.
.. versionadded:: 0.13.0
Examples
--------
>>> from scipy.interpolate import splrep, splder, splantider, splev
>>> x = np.linspace(0, np.pi/2, 70)
>>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
>>> spl = splrep(x, y)
The derivative is the inverse operation of the antiderivative,
although some floating point error accumulates:
>>> splev(1.7, spl), splev(1.7, splder(splantider(spl)))
(array(2.1565429877197317), array(2.1565429877201865))
Antiderivative can be used to evaluate definite integrals:
>>> ispl = splantider(spl)
>>> splev(np.pi/2, ispl) - splev(0, ispl)
2.2572053588768486
This is indeed an approximation to the complete elliptic integral
:math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
>>> from scipy.special import ellipk
>>> ellipk(0.8)
2.2572053268208538
"""
if n < 0:
return splder(tck, -n)
t, c, k = tck
for j in range(n):
# This is the inverse set of operations to splder.
# Compute the multiplier in the antiderivative formula.
dt = t[k+1:] - t[:-k-1]
# Compute the new coefficients
c = np.cumsum(c[:-k-1] * dt) / (k + 1)
c = np.r_[0, c, [c[-1]]*(k+2)]
# New knots
t = np.r_[t[0], t, t[-1]]
k += 1
return t, c, k
| bsd-3-clause |
Clyde-fare/scikit-learn | sklearn/utils/testing.py | 71 | 26178 | """Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import scipy as sp
import scipy.io
from functools import wraps
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import tempfile
import shutil
import os.path as op
import atexit
# WindowsError only exist on Windows
try:
WindowsError
except NameError:
WindowsError = None
import sklearn
from sklearn.base import BaseEstimator
from sklearn.externals import joblib
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regex
except ImportError:
# for Python 2
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except expected_exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("%s not raised by %s" %
(expected_exception.__name__,
callable_obj.__name__))
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the bacward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def assert_less_equal(a, b, msg=None):
message = "%r is not lower than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a <= b, message
def assert_greater_equal(a, b, msg=None):
message = "%r is not greater than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a >= b, message
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions
Parameters
----------
exceptions : exception or tuple of exception
Name of the estimator
func : callable
Calable object to raise error
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict : dict, keys=str, values=ndarray
Contains data as columns_dict[column_name] = array of data.
dataname : string
Name of data set.
matfile : string or file object
The file name string or the file-like object of the output file.
ordering : list, default None
List of column_names, determines the ordering in the data set.
Notes
-----
This function transposes all arrays, while fetch_mldata only transposes
'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV",
"RandomizedSearchCV"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder',
'MultiLabelBinarizer', 'TfidfTransformer',
'TfidfVectorizer', 'IsotonicRegression',
'OneHotEncoder', 'RandomTreesEmbedding',
'FeatureHasher', 'DummyClassifier', 'DummyRegressor',
'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash', 'HashingVectorizer',
'CheckingClassifier', 'PatchExtractor', 'CountVectorizer',
# GradientBoosting base estimators, maybe should
# exclude them in another way
'ZeroEstimator', 'ScaledLogOddsEstimator',
'QuantileEstimator', 'MeanEstimator',
'LogOddsEstimator', 'PriorProbabilityEstimator',
'_SigmoidCalibration', 'VotingClassifier']
def all_estimators(include_meta_estimators=False,
include_other=False, type_filter=None,
include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if ".tests." in modname:
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
return sorted(set(estimators))
def set_random_state(estimator, random_state=0):
if "random_state" in estimator.get_params().keys():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
import matplotlib.pyplot as plt
plt.figure()
except ImportError:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
warnings.warn("if_not_mac_os is deprecated in 0.17 and will be removed"
" in 0.19: use the safer and more generic"
" if_safe_multiprocessing_with_blas instead",
DeprecationWarning)
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def if_safe_multiprocessing_with_blas(func):
"""Decorator for tests involving both BLAS calls and multiprocessing
Under Python < 3.4 and POSIX (e.g. Linux or OSX), using multiprocessing in
conjunction with some implementation of BLAS (or other libraries that
manage an internal posix thread pool) can cause a crash or a freeze of the
Python process.
Under Python 3.4 and later, joblib uses the forkserver mode of
multiprocessing which does not trigger this problem.
In practice all known packaged distributions (from Linux distros or
Anaconda) of BLAS under Linux seems to be safe. So we this problem seems to
only impact OSX users.
This wrapper makes it possible to skip tests that can possibly cause
this crash under OSX with.
"""
@wraps(func)
def run_test(*args, **kwargs):
if sys.platform == 'darwin' and sys.version_info[:2] < (3, 4):
raise SkipTest(
"Possible multi-process bug with some BLAS under Python < 3.4")
return func(*args, **kwargs)
return run_test
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
def _delete_folder(folder_path, warn=False):
"""Utility function to cleanup a temporary folder if still existing.
Copy from joblib.pool (for independance)"""
try:
if os.path.exists(folder_path):
# This can fail under windows,
# but will succeed when called by atexit
shutil.rmtree(folder_path)
except WindowsError:
if warn:
warnings.warn("Could not delete temporary folder %s" % folder_path)
class TempMemmap(object):
def __init__(self, data, mmap_mode='r'):
self.temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_')
self.mmap_mode = mmap_mode
self.data = data
def __enter__(self):
fpath = op.join(self.temp_folder, 'data.pkl')
joblib.dump(self.data, fpath)
data_read_only = joblib.load(fpath, mmap_mode=self.mmap_mode)
atexit.register(lambda: _delete_folder(self.temp_folder, warn=True))
return data_read_only
def __exit__(self, exc_type, exc_val, exc_tb):
_delete_folder(self.temp_folder)
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
| bsd-3-clause |
PedroTrujilloV/nest-simulator | testsuite/manualtests/test_tsodyks_depr_fac.py | 13 | 1136 | # -*- coding: utf-8 -*-
#
# test_tsodyks_depr_fac.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
from scipy import *
from matplotlib.pylab import *
from matplotlib.mlab import *
def plot_spikes():
dt = 0.1 # time resolution
nbins = 1000
N = 500 # number of neurons
vm = load('voltmeter-0-0-4.dat')
figure(1)
clf()
plot(vm[:,0], vm[:,1], 'r')
xlabel('time / ms')
ylabel('$V_m [mV]$')
savefig('test_tsodyks_depressing.png')
plot_spikes()
show()
| gpl-2.0 |
IssamLaradji/scikit-learn | sklearn/manifold/tests/test_t_sne.py | 10 | 9541 | import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils import check_random_state
from sklearn.manifold.t_sne import _joint_probabilities
from sklearn.manifold.t_sne import _kl_divergence
from sklearn.manifold.t_sne import _gradient_descent
from sklearn.manifold.t_sne import trustworthiness
from sklearn.manifold.t_sne import TSNE
from sklearn.manifold._utils import _binary_search_perplexity
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
def test_gradient_descent_stops():
"""Test stopping conditions of gradient descent."""
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=1e-5, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 1.0)
assert_equal(it, 0)
assert("gradient norm" in out)
# Error difference
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.2, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.9)
assert_equal(it, 1)
assert("error difference" in out)
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function, np.zeros(1), 0, n_iter=100,
n_iter_without_progress=10, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=-1.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 11)
assert("did not make any progress" in out)
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 10)
assert("Iteration 10" in out)
def test_binary_search():
"""Test if the binary search finds Gaussians with desired perplexity."""
random_state = check_random_state(0)
distances = random_state.randn(50, 2)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, desired_perplexity, verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i])))
for i in range(P.shape[0])])
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_gradient():
"""Test gradient of Kullback-Leibler divergence."""
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)
fun = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[0]
grad = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
decimal=5)
def test_trustworthiness():
"""Test trustworthiness score."""
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert_equal(trustworthiness(X, 5.0 + X / 10.0), 1.0)
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert_less(trustworthiness(X, X_embedded), 0.6)
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
def test_preserve_trustworthiness_approximately():
"""Nearest neighbors should be preserved approximately."""
random_state = check_random_state(0)
X = random_state.randn(100, 2)
for init in ('random', 'pca'):
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
init=init, random_state=0)
X_embedded = tsne.fit_transform(X)
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_fit_csr_matrix():
"""X can be a sparse matrix."""
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
random_state=0)
X_embedded = tsne.fit_transform(X_csr)
assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
"""Nearest neighbors should be preserved approximately."""
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
metric="precomputed", random_state=0)
X_embedded = tsne.fit_transform(D)
assert_almost_equal(trustworthiness(D, X_embedded, n_neighbors=1,
precomputed=True), 1.0, decimal=1)
def test_early_exaggeration_too_small():
"""Early exaggeration factor must be >= 1."""
tsne = TSNE(early_exaggeration=0.99)
assert_raises_regexp(ValueError, "early_exaggeration .*",
tsne.fit_transform, np.array([[0.0]]))
def test_too_few_iterations():
"""Number of gradient descent iterations must be at least 200."""
tsne = TSNE(n_iter=199)
assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform,
np.array([[0.0]]))
def test_non_square_precomputed_distances():
"""Precomputed distance matrices must be square matrices."""
tsne = TSNE(metric="precomputed")
assert_raises_regexp(ValueError, ".* square distance matrix",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_init_not_available():
"""'init' must be 'pca' or 'random'."""
assert_raises_regexp(ValueError, "'init' must be either 'pca' or 'random'",
TSNE, init="not available")
def test_distance_not_available():
"""'metric' must be valid."""
tsne = TSNE(metric="not available")
assert_raises_regexp(ValueError, "Unknown metric not available.*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
"""Precomputed distance matrices must be square matrices."""
tsne = TSNE(metric="precomputed", init="pca")
assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_verbose():
random_state = check_random_state(0)
tsne = TSNE(verbose=2)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[t-SNE]" in out)
assert("Computing pairwise distances" in out)
assert("Computed conditional probabilities" in out)
assert("Mean sigma" in out)
assert("Finished" in out)
assert("early exaggeration" in out)
assert("Finished" in out)
def test_chebyshev_metric():
"""t-SNE should allow metrics that cannot be squared (issue #3526)."""
random_state = check_random_state(0)
tsne = TSNE(verbose=2, metric="chebyshev")
X = random_state.randn(5, 2)
tsne.fit_transform(X)
| bsd-3-clause |
detrout/debian-statsmodels | statsmodels/sandbox/tsa/examples/ex_mle_arma.py | 33 | 4587 | # -*- coding: utf-8 -*-
"""
TODO: broken because of changes to arguments and import paths
fixing this needs a closer look
Created on Thu Feb 11 23:41:53 2010
Author: josef-pktd
copyright: Simplified BSD see license.txt
"""
from __future__ import print_function
import numpy as np
from numpy.testing import assert_almost_equal
import matplotlib.pyplot as plt
import numdifftools as ndt
import statsmodels.api as sm
from statsmodels.sandbox import tsa
from statsmodels.tsa.arma_mle import Arma # local import
from statsmodels.tsa.arima_process import arma_generate_sample
examples = ['arma']
if 'arma' in examples:
print("\nExample 1")
print('----------')
ar = [1.0, -0.8]
ma = [1.0, 0.5]
y1 = arma_generate_sample(ar,ma,1000,0.1)
y1 -= y1.mean() #no mean correction/constant in estimation so far
arma1 = Arma(y1)
arma1.nar = 1
arma1.nma = 1
arma1res = arma1.fit_mle(order=(1,1), method='fmin')
print(arma1res.params)
#Warning need new instance otherwise results carry over
arma2 = Arma(y1)
arma2.nar = 1
arma2.nma = 1
res2 = arma2.fit(method='bfgs')
print(res2.params)
print(res2.model.hessian(res2.params))
print(ndt.Hessian(arma1.loglike, stepMax=1e-2)(res2.params))
arest = tsa.arima.ARIMA(y1)
resls = arest.fit((1,0,1))
print(resls[0])
print(resls[1])
print('\nparameter estimate - comparing methods')
print('---------------------------------------')
print('parameter of DGP ar(1), ma(1), sigma_error')
print([-0.8, 0.5, 0.1])
print('mle with fmin')
print(arma1res.params)
print('mle with bfgs')
print(res2.params)
print('cond. least squares uses optim.leastsq ?')
errls = arest.error_estimate
print(resls[0], np.sqrt(np.dot(errls,errls)/errls.shape[0]))
err = arma1.geterrors(res2.params)
print('cond least squares parameter cov')
#print(np.dot(err,err)/err.shape[0] * resls[1])
#errls = arest.error_estimate
print(np.dot(errls,errls)/errls.shape[0] * resls[1])
# print('fmin hessian')
# print(arma1res.model.optimresults['Hopt'][:2,:2])
print('bfgs hessian')
print(res2.model.optimresults['Hopt'][:2,:2])
print('numdifftools inverse hessian')
print(-np.linalg.inv(ndt.Hessian(arma1.loglike, stepMax=1e-2)(res2.params))[:2,:2])
print('\nFitting Arma(1,1) to squared data')
arma3 = Arma(y1**2)
res3 = arma3.fit(method='bfgs')
print(res3.params)
print('\nFitting Arma(3,3) to data from DGP Arma(1,1)')
arma4 = Arma(y1)
arma4.nar = 3
arma4.nma = 3
#res4 = arma4.fit(method='bfgs')
res4 = arma4.fit(start_params=[-0.5, -0.1,-0.1,0.2,0.1,0.1,0.5])
print(res4.params)
print('numdifftools inverse hessian')
pcov = -np.linalg.inv(ndt.Hessian(arma4.loglike, stepMax=1e-2)(res4.params))
#print(pcov)
print('standard error of parameter estimate from Hessian')
pstd = np.sqrt(np.diag(pcov))
print(pstd)
print('t-values')
print(res4.params/pstd)
print('eigenvalues of pcov:')
print(np.linalg.eigh(pcov)[0])
print('sometimes they are negative')
print("\nExample 2 - DGP is Arma(3,3)")
print('-----------------------------')
ar = [1.0, -0.6, -0.2, -0.1]
ma = [1.0, 0.5, 0.1, 0.1]
y2 = arest.generate_sample(ar,ma,1000,0.1)
y2 -= y2.mean() #no mean correction/constant in estimation so far
print('\nFitting Arma(3,3) to data from DGP Arma(3,3)')
arma4 = Arma(y2)
arma4.nar = 3
arma4.nma = 3
#res4 = arma4.fit(method='bfgs')
print('\ntrue parameters')
print('ar', ar[1:])
print('ma', ma[1:])
res4 = arma4.fit(start_params=[-0.5, -0.1,-0.1,0.2,0.1,0.1,0.5])
print(res4.params)
print('numdifftools inverse hessian')
pcov = -np.linalg.inv(ndt.Hessian(arma4.loglike, stepMax=1e-2)(res4.params))
#print(pcov)
print('standard error of parameter estimate from Hessian')
pstd = np.sqrt(np.diag(pcov))
print(pstd)
print('t-values')
print(res4.params/pstd)
print('eigenvalues of pcov:')
print(np.linalg.eigh(pcov)[0])
print('sometimes they are negative')
arma6 = Arma(y2)
arma6.nar = 3
arma6.nma = 3
res6 = arma6.fit(start_params=[-0.5, -0.1,-0.1,0.2,0.1,0.1,0.5],
method='bfgs')
print('\nmle with bfgs')
print(res6.params)
print('pstd with bfgs hessian')
hopt = res6.model.optimresults['Hopt']
print(np.sqrt(np.diag(hopt)))
#fmin estimates for coefficients in ARMA(3,3) look good
#but not inverse Hessian, sometimes negative values for variance
| bsd-3-clause |
ezg/PanoramicDataWin8 | backend/scripts/class_test.py | 1 | 5795 | from dataprovider import SequentialDataProvider
from databinner import DataBinner
import json
import numpy as np
import time
from sklearn.metrics import roc_curve
from sklearn.metrics import confusion_matrix
from sklearn.utils import compute_class_weight
from sklearn.metrics import auc
from sklearn.metrics import classification_report
from sklearn.metrics import roc_curve
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import HashingVectorizer
def getClassifier(classifier):
print classifier
if classifier == 'sgd':
return SGDClassifier()
if classifier == 'naive_bayes':
return MultinomialNB(alpha=0.01)
elif classifier == 'perceptron':
return Perceptron()
elif classifier == 'passive_aggressive':
return PassiveAggressiveClassifier()
raise NotImplementedError()
def classifyStats( cm, y_test, y_prob, tile_size):
#print cm
#print classification_report(y_test, y_pred)
tp = float(cm[1][1])
fp = float(cm[0][1])
tn = float(cm[0][0])
fn = float(cm[1][0])
#precision = tp / (tp + fp)
#recall = tp / (tp + fn)
#f1 = 2 * tp / (2 * tp + fp + fn)
p_support = (tp + fn) / tile_size
n_support = (tn + fp) / tile_size
precision = tp / max((tp + fp), 1) * p_support + tn / max((tn + fn), 1) * n_support
recall = tp / max((tp + fn), 1) * p_support + tn / max((tn + fp), 1) * n_support
f1 = 2 * precision * recall / (precision + recall)
fpr, tpr, thresholds = roc_curve(y_test, y_prob[:, 1])
roc_auc = auc(fpr, tpr)
stats = {'tp': tp,
'fp': fp,
'tn': tn,
'fn': fn,
'precision': precision,
'recall': recall,
'f1': f1,
'fpr': fpr.tolist(),
'tpr': tpr.tolist(),
'auc': roc_auc}
return stats
job = {
"type": "execute",
"dataset": "cars.csv_100000",
"task": {
"type": "classify",
"classifier": "passive_aggressive",
"chunkSize": 10000,
"features": [
"car_name", "model_year"
],
"label": "mpg < 30",
"filter": ""
}
}
print json.dumps(job)
task = job['task']
dp = SequentialDataProvider(job['dataset'], 'C:\\data\\', task['chunkSize'], 0)
cls = getClassifier(task['classifier'])
def default(o):
if isinstance(o, np.integer): return int(o)
raise TypeError
cls_stats = {}
cls_name = task['classifier']
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
def vectorize(x, dt, feats, n):
ret = []
for i, feat in enumerate(feats):
if dt[i] == 'object':
v = [0] * n
v[hash(x[feat]) % n] = 1
ret.extend(v)
else:
ret.append(x[feat])
return ret
X_test = []
y_test = None
while True:
c, df = dp.getDataFrame()
tick = time.time()
if not task['filter'] == '':
df = df.query(task['filter'])
print 'progress', str(c * 100.0) + '%'
if not df is None:
# retain first as test
if len(X_test) == 0:
dt = df[task['features']].dtypes
feats = task['features']
df['test'] = df.apply(lambda x: vectorize(x, dt, feats, 25), axis=1)
X_test = np.array([list(x) for x in df['test']])
#X_test = df[task['features']]
y_test = np.array([1 if x else 0 for x in np.array(df.eval(task['label']))])
else:
s = time.time()
df['test'] = df.apply(lambda x: vectorize(x, dt, feats, 25), axis=1)
X_train = np.array([list(x) for x in df['test']])
#X_train = df[task['features']]
print "time", (time.time() - s), X_train.shape
y_train = np.array([1 if x else 0 for x in np.array(df.eval(task['label']))])
#print len(X_train), len(y_train)
cls.partial_fit(X_train, y_train, classes=np.array([0, 1]))
y_prob = None
y_pred = None
if cls_name in ['sgd', 'perceptron', 'passive_aggressive']:
y_pred = cls.predict(X_test)
y_prob = np.array([[0,y] for y in y_pred])
else:
y_prob = cls.predict_proba(X_test)
print y_prob
y_pred = [1 if t[0] >= 0.5 else 0 for t in y_prob]
cm = confusion_matrix(y_test, y_pred)
stats = classifyStats(cm, y_test, y_prob, len(y_test))
print stats['f1']
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
run_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
print cls_stats[cls_name]['accuracy']
if df is None or c == 1.0:
break
print | apache-2.0 |
zhoujh30/folium | folium/folium.py | 4 | 50182 | # -*- coding: utf-8 -*-
"""
Folium
-------
Make beautiful, interactive maps with Python and Leaflet.js
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import codecs
import functools
import json
from uuid import uuid4
from jinja2 import Environment, PackageLoader
from pkg_resources import resource_string
from folium import utilities
from folium.six import text_type, binary_type, iteritems
import sys
import base64
ENV = Environment(loader=PackageLoader('folium', 'templates'))
def initialize_notebook():
"""Initialize the IPython notebook display elements."""
try:
from IPython.core.display import display, HTML
except ImportError:
print("IPython Notebook could not be loaded.")
lib_css = ENV.get_template('ipynb_init_css.html')
lib_js = ENV.get_template('ipynb_init_js.html')
leaflet_dvf = ENV.get_template('leaflet-dvf.markers.min.js')
display(HTML(lib_css.render()))
display(HTML(lib_js.render({'leaflet_dvf': leaflet_dvf.render()})))
def iter_obj(type):
"""Decorator to keep count of different map object types in self.mk_cnt."""
def decorator(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
self.mark_cnt[type] = self.mark_cnt.get(type, 0) + 1
func_result = func(self, *args, **kwargs)
return func_result
return wrapper
return decorator
class Map(object):
"""Create a Map with Folium."""
def __init__(self, location=None, width='100%', height='100%',
tiles='OpenStreetMap', API_key=None, max_zoom=18, min_zoom=1,
zoom_start=10, attr=None, min_lat=-90, max_lat=90,
min_lon=-180, max_lon=180):
"""Create a Map with Folium and Leaflet.js
Generate a base map of given width and height with either default
tilesets or a custom tileset URL. The following tilesets are built-in
to Folium. Pass any of the following to the "tiles" keyword:
- "OpenStreetMap"
- "MapQuest Open"
- "MapQuest Open Aerial"
- "Mapbox Bright" (Limited levels of zoom for free tiles)
- "Mapbox Control Room" (Limited levels of zoom for free tiles)
- "Stamen" (Terrain, Toner, and Watercolor)
- "Cloudmade" (Must pass API key)
- "Mapbox" (Must pass API key)
- "CartoDB" (positron and dark_matter)
You can pass a custom tileset to Folium by passing a Leaflet-style
URL to the tiles parameter:
http://{s}.yourtiles.com/{z}/{x}/{y}.png
Parameters
----------
location: tuple or list, default None
Latitude and Longitude of Map (Northing, Easting).
width: pixel int or percentage string (default: '100%')
Width of the map.
height: pixel int or percentage string (default: '100%')
Height of the map.
tiles: str, default 'OpenStreetMap'
Map tileset to use. Can use defaults or pass a custom URL.
API_key: str, default None
API key for Cloudmade or Mapbox tiles.
max_zoom: int, default 18
Maximum zoom depth for the map.
zoom_start: int, default 10
Initial zoom level for the map.
attr: string, default None
Map tile attribution; only required if passing custom tile URL.
Returns
-------
Folium Map Object
Examples
--------
>>>map = folium.Map(location=[45.523, -122.675], width=750, height=500)
>>>map = folium.Map(location=[45.523, -122.675],
tiles='Mapbox Control Room')
>>>map = folium.Map(location=(45.523, -122.675), max_zoom=20,
tiles='Cloudmade', API_key='YourKey')
>>>map = folium.Map(location=[45.523, -122.675], zoom_start=2,
tiles=('http://{s}.tiles.mapbox.com/v3/'
'mapbox.control-room/{z}/{x}/{y}.png'),
attr='Mapbox attribution')
"""
# Inits.
self.map_path = None
self.render_iframe = False
self.map_type = 'base'
self.map_id = '_'.join(['folium', uuid4().hex])
# Mark counter, JSON, Plugins.
self.mark_cnt = {}
self.json_data = {}
self.plugins = {}
# No location means we will use automatic bounds and ignore zoom
self.location = location
# If location is not passed, we center the map at 0,0
if not location:
location = [0, 0]
zoom_start = min_zoom
# Map Size Parameters.
try:
if isinstance(width, int):
width_type = 'px'
assert width > 0
else:
width_type = '%'
width = int(width.strip('%'))
assert 0 <= width <= 100
except:
msg = "Cannot parse width {!r} as {!r}".format
raise ValueError(msg(width, width_type))
self.width = width
try:
if isinstance(height, int):
height_type = 'px'
assert height > 0
else:
height_type = '%'
height = int(height.strip('%'))
assert 0 <= height <= 100
except:
msg = "Cannot parse height {!r} as {!r}".format
raise ValueError(msg(height, height_type))
self.height = height
self.map_size = {'width': width, 'height': height}
self._size = ('style="width: {0}{1}; height: {2}{3}"'
.format(width, width_type, height, height_type))
# Templates.
self.env = ENV
self.template_vars = dict(lat=location[0],
lon=location[1],
size=self._size,
max_zoom=max_zoom,
zoom_level=zoom_start,
map_id=self.map_id,
min_zoom=min_zoom,
min_lat=min_lat,
max_lat=max_lat,
min_lon=min_lon,
max_lon=max_lon)
# Tiles.
self.tiles = ''.join(tiles.lower().strip().split())
if self.tiles in ('cloudmade', 'mapbox') and not API_key:
raise ValueError('You must pass an API key if using Cloudmade'
' or non-default Mapbox tiles.')
self.default_tiles = ['openstreetmap', 'mapboxcontrolroom',
'mapquestopen', 'mapquestopenaerial',
'mapboxbright', 'mapbox', 'cloudmade',
'stamenterrain', 'stamentoner',
'stamenwatercolor',
'cartodbpositron', 'cartodbdark_matter']
self.tile_types = {}
for tile in self.default_tiles:
tile_path = 'tiles/%s' % tile
self.tile_types[tile] = {
'templ': self.env.get_template('%s/%s' % (tile_path,
'tiles.txt')),
'attr': self.env.get_template('%s/%s' % (tile_path,
'attr.txt')),
}
if self.tiles in self.tile_types:
self.template_vars['Tiles'] = (self.tile_types[self.tiles]['templ']
.render(API_key=API_key))
self.template_vars['attr'] = (self.tile_types[self.tiles]['attr']
.render())
else:
self.template_vars['Tiles'] = tiles
if not attr:
raise ValueError('Custom tiles must'
' also be passed an attribution')
if isinstance(attr, binary_type):
attr = text_type(attr, 'utf8')
self.template_vars['attr'] = attr
self.tile_types.update({'Custom': {'template': tiles,
'attr': attr}})
self.added_layers = []
self.template_vars.setdefault('wms_layers', [])
self.template_vars.setdefault('tile_layers', [])
self.template_vars.setdefault('image_layers', [])
@iter_obj('simple')
def add_tile_layer(self, tile_name=None, tile_url=None, active=False):
"""Adds a simple tile layer.
Parameters
----------
tile_name: string
name of the tile layer
tile_url: string
url location of the tile layer
active: boolean
should the layer be active when added
"""
if tile_name not in self.added_layers:
tile_name = tile_name.replace(" ", "_")
tile_temp = self.env.get_template('tile_layer.js')
tile = tile_temp.render({'tile_name': tile_name,
'tile_url': tile_url})
self.template_vars.setdefault('tile_layers', []).append((tile))
self.added_layers.append({tile_name: tile_url})
@iter_obj('simple')
def add_wms_layer(self, wms_name=None, wms_url=None, wms_format=None,
wms_layers=None, wms_transparent=True):
"""Adds a simple tile layer.
Parameters
----------
wms_name: string
name of wms layer
wms_url : string
url of wms layer
"""
if wms_name not in self.added_layers:
wms_name = wms_name.replace(" ", "_")
wms_temp = self.env.get_template('wms_layer.js')
wms = wms_temp.render({
'wms_name': wms_name,
'wms_url': wms_url,
'wms_format': wms_format,
'wms_layer_names': wms_layers,
'wms_transparent': str(wms_transparent).lower()})
self.template_vars.setdefault('wms_layers', []).append((wms))
self.added_layers.append({wms_name: wms_url})
@iter_obj('simple')
def add_layers_to_map(self):
"""
Required function to actually add the layers to the HTML packet.
"""
layers_temp = self.env.get_template('add_layers.js')
data_string = ''
for i, layer in enumerate(self.added_layers):
name = list(layer.keys())[0]
if i < len(self.added_layers)-1:
term_string = ",\n"
else:
term_string += "\n"
data_string += '\"{}\": {}'.format(name, name, term_string)
data_layers = layers_temp.render({'layers': data_string})
self.template_vars.setdefault('data_layers', []).append((data_layers))
@iter_obj('simple')
def simple_marker(self, location=None, popup=None,
marker_color='blue', marker_icon='info-sign',
clustered_marker=False, icon_angle=0, popup_width=300):
"""Create a simple stock Leaflet marker on the map, with optional
popup text or Vincent visualization.
Parameters
----------
location: tuple or list, default None
Latitude and Longitude of Marker (Northing, Easting)
popup: string or tuple, default 'Pop Text'
Input text or visualization for object. Can pass either text,
or a tuple of the form (Vincent object, 'vis_path.json')
It is possible to adjust the width of text/HTML popups
using the optional keywords `popup_width` (default is 300px).
marker_color
color of marker you want
marker_icon
icon from (http://getbootstrap.com/components/) you want on the
marker
clustered_marker
boolean of whether or not you want the marker clustered with
other markers
Returns
-------
Marker names and HTML in obj.template_vars
Example
-------
>>>map.simple_marker(location=[45.5, -122.3], popup='Portland, OR')
>>>map.simple_marker(location=[45.5, -122.3], popup=(vis, 'vis.json'))
"""
count = self.mark_cnt['simple']
mark_temp = self.env.get_template('simple_marker.js')
marker_num = 'marker_{0}'.format(count)
add_line = "{'icon':"+marker_num+"_icon}"
icon_temp = self.env.get_template('simple_icon.js')
icon = icon_temp.render({'icon': marker_icon,
'icon_name': marker_num+"_icon",
'markerColor': marker_color,
'icon_angle': icon_angle})
# Get marker and popup.
marker = mark_temp.render({'marker': 'marker_' + str(count),
'lat': location[0],
'lon': location[1],
'icon': add_line
})
popup_out = self._popup_render(popup=popup, mk_name='marker_',
count=count, width=popup_width)
if clustered_marker:
add_mark = 'clusteredmarkers.addLayer(marker_{0})'.format(count)
name = 'cluster_markers'
else:
add_mark = 'map.addLayer(marker_{0})'.format(count)
name = 'custom_markers'
append = (icon, marker, popup_out, add_mark)
self.template_vars.setdefault(name, []).append(append)
@iter_obj('div_mark')
def div_markers(self, locations=None, popups=None,
marker_size=10, popup_width=300):
"""Create a simple div marker on the map, with optional
popup text or Vincent visualization. Useful for marking points along a
line.
Parameters
----------
locations: list of locations, where each location is an array
Latitude and Longitude of Marker (Northing, Easting)
popup: list of popups, each popup should be a string or tuple.
Default 'Pop Text'
Input text or visualization for object. Can pass either text,
or a tuple of the form (Vincent object, 'vis_path.json')
It is possible to adjust the width of text/HTML popups
using the optional keywords `popup_width`.
(Leaflet default is 300px.)
marker_size
default is 5
Returns
-------
Marker names and HTML in obj.template_vars
Example
-------
>>> map.div_markers(locations=[[37.421114, -122.128314],
... [37.391637, -122.085416],
... [37.388832, -122.087709]],
... popups=['1437494575531',
... '1437492135937',
... '1437493590434'])
"""
call_cnt = self.mark_cnt['div_mark']
if locations is None or popups is None:
raise RuntimeError("Both locations and popups are mandatory")
for (point_cnt, (location, popup)) in enumerate(zip(locations,
popups)):
marker_num = 'div_marker_{0}_{1}'.format(call_cnt, point_cnt)
icon_temp = self.env.get_template('static_div_icon.js')
icon_name = marker_num+"_icon"
icon = icon_temp.render({'icon_name': icon_name,
'size': marker_size})
mark_temp = self.env.get_template('simple_marker.js')
# Get marker and popup.
marker = mark_temp.render({'marker': marker_num,
'lat': location[0],
'lon': location[1],
'icon': "{'icon':"+icon_name+"}"
})
mk_name = 'div_marker_{0}_'.format(call_cnt)
popup_out = self._popup_render(popup=popup,
mk_name=mk_name,
count=point_cnt, width=popup_width)
add_mark = 'map.addLayer(div_marker_{0}_{1})'.format(call_cnt,
point_cnt)
append = (icon, marker, popup_out, add_mark)
self.template_vars.setdefault('div_markers', []).append(append)
@iter_obj('line')
def line(self, locations,
line_color=None, line_opacity=None, line_weight=None,
popup=None, popup_width=300):
"""Add a line to the map with optional styles.
Parameters
----------
locations: list of points (latitude, longitude)
Latitude and Longitude of line (Northing, Easting)
line_color: string, default Leaflet's default ('#03f')
line_opacity: float, default Leaflet's default (0.5)
line_weight: float, default Leaflet's default (5)
popup: string or tuple, default 'Pop Text'
Input text or visualization for object. Can pass either text,
or a tuple of the form (Vincent object, 'vis_path.json')
It is possible to adjust the width of text/HTML popups
using the optional keywords `popup_width` (default is 300px).
Note: If the optional styles are omitted, they will not be included
in the HTML output and will obtain the Leaflet defaults listed above.
Example
-------
>>>map.line(locations=[(45.5, -122.3), (42.3, -71.0)])
>>>map.line(locations=[(45.5, -122.3), (42.3, -71.0)],
line_color='red', line_opacity=1.0)
"""
count = self.mark_cnt['line']
line_temp = self.env.get_template('polyline.js')
polyline_opts = {'color': line_color, 'weight': line_weight,
'opacity': line_opacity}
varname = 'line_{}'.format(count)
line_rendered = line_temp.render({'line': varname,
'locations': locations,
'options': polyline_opts})
popup_out = self._popup_render(popup=popup, mk_name='line_',
count=count, width=popup_width)
add_line = 'map.addLayer({});'.format(varname)
append = (line_rendered, popup_out, add_line)
self.template_vars.setdefault('lines', []).append((append))
@iter_obj('multiline')
def multiline(self, locations, line_color=None, line_opacity=None,
line_weight=None):
"""Add a multiPolyline to the map with optional styles.
A multiPolyline is single layer that consists of several polylines that
share styling/popup.
Parameters
----------
locations: list of lists of points (latitude, longitude)
Latitude and Longitude of line (Northing, Easting)
line_color: string, default Leaflet's default ('#03f')
line_opacity: float, default Leaflet's default (0.5)
line_weight: float, default Leaflet's default (5)
Note: If the optional styles are omitted, they will not be included
in the HTML output and will obtain the Leaflet defaults listed above.
Example
-------
# FIXME: Add another example.
>>> m.multiline(locations=[[(45.5236, -122.675), (45.5236, -122.675)],
[(45.5237, -122.675), (45.5237, -122.675)],
[(45.5238, -122.675), (45.5238, -122.675)]])
>>> m.multiline(locations=[[(45.5236, -122.675), (45.5236, -122.675)],
[(45.5237, -122.675), (45.5237, -122.675)],
[(45.5238, -122.675), (45.5238, -122.675)]],
line_color='red', line_weight=2,
line_opacity=1.0)
"""
count = self.mark_cnt['multiline']
multiline_temp = self.env.get_template('multi_polyline.js')
multiline_opts = {'color': line_color, 'weight': line_weight,
'opacity': line_opacity}
varname = 'multiline_{}'.format(count)
multiline_rendered = multiline_temp.render({'multiline': varname,
'locations': locations,
'options': multiline_opts})
add_multiline = 'map.addLayer({});'.format(varname)
append = (multiline_rendered, add_multiline)
self.template_vars.setdefault('multilines', []).append(append)
@iter_obj('circle')
def circle_marker(self, location=None, radius=500, popup=None,
line_color='black', fill_color='black',
fill_opacity=0.6, popup_width=300):
"""Create a simple circle marker on the map, with optional popup text
or Vincent visualization.
Parameters
----------
location: tuple or list, default None
Latitude and Longitude of Marker (Northing, Easting)
radius: int, default 500
Circle radius, in pixels
popup: string or tuple, default 'Pop Text'
Input text or visualization for object. Can pass either text,
or a tuple of the form (Vincent object, 'vis_path.json')
It is possible to adjust the width of text/HTML popups
using the optional keywords `popup_width` (default is 300px).
line_color: string, default black
Line color. Can pass hex value here as well.
fill_color: string, default black
Fill color. Can pass hex value here as well.
fill_opacity: float, default 0.6
Circle fill opacity
Returns
-------
Circle names and HTML in obj.template_vars
Example
-------
>>>map.circle_marker(location=[45.5, -122.3],
radius=1000, popup='Portland, OR')
>>>map.circle_marker(location=[45.5, -122.3],
radius=1000, popup=(bar_chart, 'bar_data.json'))
"""
count = self.mark_cnt['circle']
circle_temp = self.env.get_template('circle_marker.js')
circle = circle_temp.render({'circle': 'circle_' + str(count),
'radius': radius,
'lat': location[0], 'lon': location[1],
'line_color': line_color,
'fill_color': fill_color,
'fill_opacity': fill_opacity})
popup_out = self._popup_render(popup=popup, mk_name='circle_',
count=count, width=popup_width)
add_mark = 'map.addLayer(circle_{0})'.format(count)
self.template_vars.setdefault('markers', []).append((circle,
popup_out,
add_mark))
@iter_obj('polygon')
def polygon_marker(self, location=None, line_color='black', line_opacity=1,
line_weight=2, fill_color='blue', fill_opacity=1,
num_sides=4, rotation=0, radius=15, popup=None,
popup_width=300):
"""Custom markers using the Leaflet Data Vis Framework.
Parameters
----------
location: tuple or list, default None
Latitude and Longitude of Marker (Northing, Easting)
line_color: string, default 'black'
Marker line color
line_opacity: float, default 1
Line opacity, scale 0-1
line_weight: int, default 2
Stroke weight in pixels
fill_color: string, default 'blue'
Marker fill color
fill_opacity: float, default 1
Marker fill opacity
num_sides: int, default 4
Number of polygon sides
rotation: int, default 0
Rotation angle in degrees
radius: int, default 15
Marker radius, in pixels
popup: string or tuple, default 'Pop Text'
Input text or visualization for object. Can pass either text,
or a tuple of the form (Vincent object, 'vis_path.json')
It is possible to adjust the width of text/HTML popups
using the optional keywords `popup_width` (default is 300px).
Returns
-------
Polygon marker names and HTML in obj.template_vars
"""
count = self.mark_cnt['polygon']
poly_temp = self.env.get_template('poly_marker.js')
polygon = poly_temp.render({'marker': 'polygon_' + str(count),
'lat': location[0],
'lon': location[1],
'line_color': line_color,
'line_opacity': line_opacity,
'line_weight': line_weight,
'fill_color': fill_color,
'fill_opacity': fill_opacity,
'num_sides': num_sides,
'rotation': rotation,
'radius': radius})
popup_out = self._popup_render(popup=popup, mk_name='polygon_',
count=count, width=popup_width)
add_mark = 'map.addLayer(polygon_{0})'.format(count)
self.template_vars.setdefault('markers', []).append((polygon,
popup_out,
add_mark))
# Update JS/CSS and other Plugin files.
js_temp = self.env.get_template('dvf_js_ref.txt').render()
self.template_vars.update({'dvf_js': js_temp})
polygon_js = resource_string('folium',
'plugins/leaflet-dvf.markers.min.js')
self.plugins.update({'leaflet-dvf.markers.min.js': polygon_js})
def lat_lng_popover(self):
"""Enable popovers to display Lat and Lon on each click."""
latlng_temp = self.env.get_template('lat_lng_popover.js')
self.template_vars.update({'lat_lng_pop': latlng_temp.render()})
def click_for_marker(self, popup=None):
"""Enable the addition of markers via clicking on the map. The marker
popup defaults to Lat/Lon, but custom text can be passed via the
popup parameter. Double click markers to remove them.
Parameters
----------
popup:
Custom popup text
Example
-------
>>>map.click_for_marker(popup='Your Custom Text')
"""
latlng = '"Latitude: " + lat + "<br>Longitude: " + lng '
click_temp = self.env.get_template('click_for_marker.js')
if popup:
popup_txt = ''.join(['"', popup, '"'])
else:
popup_txt = latlng
click_str = click_temp.render({'popup': popup_txt})
self.template_vars.update({'click_pop': click_str})
def fit_bounds(self, bounds, padding_top_left=None,
padding_bottom_right=None, padding=None, max_zoom=None):
"""Fit the map to contain a bounding box with the maximum zoom level possible.
Parameters
----------
bounds: list of (latitude, longitude) points
Bounding box specified as two points [southwest, northeast]
padding_top_left: (x, y) point, default None
Padding in the top left corner. Useful if some elements in
the corner, such as controls, might obscure objects you're zooming
to.
padding_bottom_right: (x, y) point, default None
Padding in the bottom right corner.
padding: (x, y) point, default None
Equivalent to setting both top left and bottom right padding to
the same value.
max_zoom: int, default None
Maximum zoom to be used.
Example
-------
>>> map.fit_bounds([[52.193636, -2.221575], [52.636878, -1.139759]])
"""
options = {
'paddingTopLeft': padding_top_left,
'paddingBottomRight': padding_bottom_right,
'padding': padding,
'maxZoom': max_zoom,
}
fit_bounds_options = {}
for key, opt in options.items():
if opt:
fit_bounds_options[key] = opt
fit_bounds = self.env.get_template('fit_bounds.js')
fit_bounds_str = fit_bounds.render({
'bounds': json.dumps(bounds),
'fit_bounds_options': json.dumps(fit_bounds_options,
sort_keys=True),
})
self.template_vars.update({'fit_bounds': fit_bounds_str})
def add_plugin(self, plugin):
"""Adds a plugin to the map.
Parameters
----------
plugin: folium.plugins object
A plugin to be added to the map. It has to implement the
methods `render_html`, `render_css` and `render_js`.
"""
plugin.add_to_map(self)
def _auto_bounds(self):
if 'fit_bounds' in self.template_vars:
return
# Get count for each feature type
ft_names = ["marker", "line", "circle", "polygon", "multiline"]
ft_names = [i for i in ft_names if i in self.mark_cnt]
# Make a comprehensive list of all the features we want to fit
feat_str = ["{name}_{count}".format(name=ft_name,
count=self.mark_cnt[ft_name])
for ft_name in ft_names for
count in range(1, self.mark_cnt[ft_name]+1)]
feat_str = "[" + ', '.join(feat_str) + "]"
fit_bounds = self.env.get_template('fit_bounds.js')
fit_bounds_str = fit_bounds.render({
'autobounds': not self.location,
'features': feat_str,
'fit_bounds_options': json.dumps({'padding': [30, 30]}),
})
self.template_vars.update({'fit_bounds': fit_bounds_str.strip()})
def _popup_render(self, popup=None, mk_name=None, count=None,
width=300):
"""Popup renderer: either text or Vincent/Vega.
Parameters
----------
popup: str or Vincent tuple, default None
String for text popup, or tuple of (Vincent object, json_path)
mk_name: str, default None
Type of marker. Simple, Circle, etc.
count: int, default None
Count of marker
"""
if not popup:
return ''
else:
if sys.version_info >= (3, 0):
utype, stype = str, bytes
else:
utype, stype = unicode, str
if isinstance(popup, (utype, stype)):
popup_temp = self.env.get_template('simple_popup.js')
if isinstance(popup, utype):
popup_txt = popup.encode('ascii', 'xmlcharrefreplace')
else:
popup_txt = popup
if sys.version_info >= (3, 0):
popup_txt = popup_txt.decode()
pop_txt = json.dumps(str(popup_txt))
return popup_temp.render({'pop_name': mk_name + str(count),
'pop_txt': pop_txt, 'width': width})
elif isinstance(popup, tuple):
# Update template with JS libs.
vega_temp = self.env.get_template('vega_ref.txt').render()
jquery_temp = self.env.get_template('jquery_ref.txt').render()
d3_temp = self.env.get_template('d3_ref.txt').render()
vega_parse = self.env.get_template('vega_parse.js').render()
self.template_vars.update({'vega': vega_temp,
'd3': d3_temp,
'jquery': jquery_temp,
'vega_parse': vega_parse})
# Parameters for Vega template.
vega = popup[0]
mark = ''.join([mk_name, str(count)])
json_out = popup[1]
div_id = popup[1].split('.')[0]
width = vega.width
height = vega.height
if isinstance(vega.padding, dict):
width += vega.padding['left']+vega.padding['right']
height += vega.padding['top']+vega.padding['bottom']
else:
width += 75
height += 50
max_width = max([self.map_size['width'], width])
vega_id = '#' + div_id
popup_temp = self.env.get_template('vega_marker.js')
return popup_temp.render({'mark': mark, 'div_id': div_id,
'width': width, 'height': height,
'max_width': max_width,
'json_out': json_out,
'vega_id': vega_id})
else:
raise TypeError("Unrecognized popup type: {!r}".format(popup))
@iter_obj('geojson')
def geo_json(self, geo_path=None, geo_str=None, data_out='data.json',
data=None, columns=None, key_on=None, threshold_scale=None,
fill_color='blue', fill_opacity=0.6, line_color='black',
line_weight=1, line_opacity=1, legend_name=None,
topojson=None, reset=False):
"""Apply a GeoJSON overlay to the map.
Plot a GeoJSON overlay on the base map. There is no requirement
to bind data (passing just a GeoJSON plots a single-color overlay),
but there is a data binding option to map your columnar data to
different feature objects with a color scale.
If data is passed as a Pandas dataframe, the "columns" and "key-on"
keywords must be included, the first to indicate which DataFrame
columns to use, the second to indicate the layer in the GeoJSON
on which to key the data. The 'columns' keyword does not need to be
passed for a Pandas series.
Colors are generated from color brewer (http://colorbrewer2.org/)
sequential palettes on a D3 threshold scale. The scale defaults to the
following quantiles: [0, 0.5, 0.75, 0.85, 0.9]. A custom scale can be
passed to `threshold_scale` of length <=6, in order to match the
color brewer range.
TopoJSONs can be passed as "geo_path", but the "topojson" keyword must
also be passed with the reference to the topojson objects to convert.
See the topojson.feature method in the TopoJSON API reference:
https://github.com/mbostock/topojson/wiki/API-Reference
Parameters
----------
geo_path: string, default None
URL or File path to your GeoJSON data
geo_str: string, default None
String of GeoJSON, alternative to geo_path
data_out: string, default 'data.json'
Path to write Pandas DataFrame/Series to JSON if binding data
data: Pandas DataFrame or Series, default None
Data to bind to the GeoJSON.
columns: dict or tuple, default None
If the data is a Pandas DataFrame, the columns of data to be bound.
Must pass column 1 as the key, and column 2 the values.
key_on: string, default None
Variable in the GeoJSON file to bind the data to. Must always
start with 'feature' and be in JavaScript objection notation.
Ex: 'feature.id' or 'feature.properties.statename'.
threshold_scale: list, default None
Data range for D3 threshold scale. Defaults to the following range
of quantiles: [0, 0.5, 0.75, 0.85, 0.9], rounded to the nearest
order-of-magnitude integer. Ex: 270 rounds to 200, 5600 to 6000.
fill_color: string, default 'blue'
Area fill color. Can pass a hex code, color name, or if you are
binding data, one of the following color brewer palettes:
'BuGn', 'BuPu', 'GnBu', 'OrRd', 'PuBu', 'PuBuGn', 'PuRd', 'RdPu',
'YlGn', 'YlGnBu', 'YlOrBr', and 'YlOrRd'.
fill_opacity: float, default 0.6
Area fill opacity, range 0-1.
line_color: string, default 'black'
GeoJSON geopath line color.
line_weight: int, default 1
GeoJSON geopath line weight.
line_opacity: float, default 1
GeoJSON geopath line opacity, range 0-1.
legend_name: string, default None
Title for data legend. If not passed, defaults to columns[1].
topojson: string, default None
If using a TopoJSON, passing "objects.yourfeature" to the topojson
keyword argument will enable conversion to GeoJSON.
reset: boolean, default False
Remove all current geoJSON layers, start with new layer
Output
------
GeoJSON data layer in obj.template_vars
Example
-------
>>> m.geo_json(geo_path='us-states.json', line_color='blue',
line_weight=3)
>>> m.geo_json(geo_path='geo.json', data=df,
columns=['Data 1', 'Data 2'],
key_on='feature.properties.myvalue', fill_color='PuBu',
threshold_scale=[0, 20, 30, 40, 50, 60])
>>> m.geo_json(geo_path='countries.json', topojson='objects.countries')
"""
if reset:
reset_vars = ['json_paths', 'func_vars', 'color_scales',
'geo_styles', 'gjson_layers', 'map_legends',
'topo_convert']
for var in reset_vars:
self.template_vars.update({var: []})
self.mark_cnt['geojson'] = 1
def json_style(style_cnt, line_color, line_weight, line_opacity,
fill_color, fill_opacity, quant_fill):
"""Generate JSON styling function from template"""
style_temp = self.env.get_template('geojson_style.js')
style = style_temp.render({'style': style_cnt,
'line_color': line_color,
'line_weight': line_weight,
'line_opacity': line_opacity,
'fill_color': fill_color,
'fill_opacity': fill_opacity,
'quantize_fill': quant_fill})
return style
# Set map type to geojson.
self.map_type = 'geojson'
# Get JSON map layer template pieces, convert TopoJSON if necessary.
# geo_str is really a hack.
if geo_path:
geo_path = ".defer(d3.json, '{0}')".format(geo_path)
elif geo_str:
fmt = (".defer(function(callback)"
"{{callback(null, JSON.parse('{}'))}})").format
geo_path = fmt(geo_str)
if topojson is None:
map_var = '_'.join(['gjson', str(self.mark_cnt['geojson'])])
layer_var = map_var
else:
map_var = '_'.join(['tjson', str(self.mark_cnt['geojson'])])
topo_obj = '.'.join([map_var, topojson])
layer_var = '_'.join(['topo', str(self.mark_cnt['geojson'])])
topo_templ = self.env.get_template('topo_func.js')
topo_func = topo_templ.render({'map_var': layer_var,
't_var': map_var,
't_var_obj': topo_obj})
topo_lib = self.env.get_template('topojson_ref.txt').render()
self.template_vars.update({'topojson': topo_lib})
self.template_vars.setdefault('topo_convert',
[]).append(topo_func)
style_count = '_'.join(['style', str(self.mark_cnt['geojson'])])
# Get Data binding pieces if available.
if data is not None:
import pandas as pd
# Create DataFrame with only the relevant columns.
if isinstance(data, pd.DataFrame):
data = pd.concat([data[columns[0]], data[columns[1]]], axis=1)
# Save data to JSON.
self.json_data[data_out] = utilities.transform_data(data)
# Add data to queue.
d_path = ".defer(d3.json, '{0}')".format(data_out)
self.template_vars.setdefault('json_paths', []).append(d_path)
# Add data variable to makeMap function.
data_var = '_'.join(['data', str(self.mark_cnt['geojson'])])
self.template_vars.setdefault('func_vars', []).append(data_var)
# D3 Color scale.
series = data[columns[1]]
if threshold_scale and len(threshold_scale) > 6:
raise ValueError
domain = threshold_scale or utilities.split_six(series=series)
if len(domain) > 253:
raise ValueError('The threshold scale must be length <= 253')
if not utilities.color_brewer(fill_color):
raise ValueError('Please pass a valid color brewer code to '
'fill_local. See docstring for valid codes.')
palette = utilities.color_brewer(fill_color, len(domain))
d3range = palette[0: len(domain) + 1]
tick_labels = utilities.legend_scaler(domain)
color_temp = self.env.get_template('d3_threshold.js')
d3scale = color_temp.render({'domain': domain,
'range': d3range})
self.template_vars.setdefault('color_scales', []).append(d3scale)
# Create legend.
name = legend_name or columns[1]
leg_templ = self.env.get_template('d3_map_legend.js')
legend = leg_templ.render({'lin_max': int(domain[-1]*1.1),
'tick_labels': tick_labels,
'caption': name})
self.template_vars.setdefault('map_legends', []).append(legend)
# Style with color brewer colors.
matchColor = 'color(matchKey({0}, {1}))'.format(key_on, data_var)
style = json_style(style_count, line_color, line_weight,
line_opacity, None, fill_opacity, matchColor)
else:
style = json_style(style_count, line_color, line_weight,
line_opacity, fill_color, fill_opacity, None)
layer = ('gJson_layer_{0} = L.geoJson({1}, {{style: {2},'
'onEachFeature: onEachFeature}}).addTo(map)'
.format(self.mark_cnt['geojson'], layer_var, style_count))
self.template_vars.setdefault('json_paths', []).append(geo_path)
self.template_vars.setdefault('func_vars', []).append(map_var)
self.template_vars.setdefault('geo_styles', []).append(style)
self.template_vars.setdefault('gjson_layers', []).append(layer)
@iter_obj('image_overlay')
def image_overlay(self, data, opacity=0.25, min_lat=-90.0, max_lat=90.0,
min_lon=-180.0, max_lon=180.0, image_name=None,
filename=None):
"""
Simple image overlay of raster data from a numpy array. This is a
lightweight way to overlay geospatial data on top of a map. If your
data is high res, consider implementing a WMS server and adding a WMS
layer.
This function works by generating a PNG file from a numpy array. If
you do not specify a filename, it will embed the image inline.
Otherwise, it saves the file in the current directory, and then adds
it as an image overlay layer in leaflet.js. By default, the image is
placed and stretched using bounds that cover the entire globe.
Parameters
----------
data: numpy array OR url string, required.
if numpy array, must be a image format,
i.e., NxM (mono), NxMx3 (rgb), or NxMx4 (rgba)
if url, must be a valid url to a image (local or external)
opacity: float, default 0.25
Image layer opacity in range 0 (transparent) to 1 (opaque)
min_lat: float, default -90.0
max_lat: float, default 90.0
min_lon: float, default -180.0
max_lon: float, default 180.0
image_name: string, default None
The name of the layer object in leaflet.js
filename: string, default None
Optional file name of output.png for image overlay.
Use `None` for inline PNG.
Output
------
Image overlay data layer in obj.template_vars
Examples
-------
# assumes a map object `m` has been created
>>> import numpy as np
>>> data = np.random.random((100,100))
# to make a rgba from a specific matplotlib colormap:
>>> import matplotlib.cm as cm
>>> cmapper = cm.cm.ColorMapper('jet')
>>> data2 = cmapper.to_rgba(np.random.random((100,100)))
>>> # Place the data over all of the globe (will be pretty pixelated!)
>>> m.image_overlay(data)
>>> # Put it only over a single city (Paris).
>>> m.image_overlay(data, min_lat=48.80418, max_lat=48.90970,
... min_lon=2.25214, max_lon=2.44731)
"""
if isinstance(data, str):
filename = data
else:
try:
png_str = utilities.write_png(data)
except Exception as e:
raise e
if filename is not None:
with open(filename, 'wb') as fd:
fd.write(png_str)
else:
png = "data:image/png;base64,{}".format
filename = png(base64.b64encode(png_str).decode('utf-8'))
if image_name not in self.added_layers:
if image_name is None:
image_name = "Image_Overlay"
else:
image_name = image_name.replace(" ", "_")
image_url = filename
image_bounds = [[min_lat, min_lon], [max_lat, max_lon]]
image_opacity = opacity
image_temp = self.env.get_template('image_layer.js')
image = image_temp.render({'image_name': image_name,
'image_url': image_url,
'image_bounds': image_bounds,
'image_opacity': image_opacity})
self.template_vars['image_layers'].append(image)
self.added_layers.append(image_name)
def _build_map(self, html_templ=None, templ_type='string'):
self._auto_bounds()
"""Build HTML/JS/CSS from Templates given current map type."""
if html_templ is None:
map_types = {'base': 'fol_template.html',
'geojson': 'geojson_template.html'}
# Check current map type.
type_temp = map_types[self.map_type]
html_templ = self.env.get_template(type_temp)
else:
if templ_type == 'string':
html_templ = self.env.from_string(html_templ)
self.HTML = html_templ.render(self.template_vars, plugins=self.plugins)
def create_map(self, path='map.html', plugin_data_out=True, template=None):
"""Write Map output to HTML and data output to JSON if available.
Parameters:
-----------
path: string, default 'map.html'
Path for HTML output for map
plugin_data_out: boolean, default True
If using plugins such as awesome markers, write all plugin
data such as JS/CSS/images to path
template: string, default None
Custom template to render
"""
self.map_path = path
self._build_map(template)
with codecs.open(path, 'w', 'utf8') as f:
f.write(self.HTML)
if self.json_data:
for path, data in iteritems(self.json_data):
with open(path, 'w') as g:
json.dump(data, g)
if self.plugins and plugin_data_out:
for name, plugin in iteritems(self.plugins):
with open(name, 'w') as f:
if isinstance(plugin, binary_type):
plugin = text_type(plugin, 'utf8')
f.write(plugin)
def _repr_html_(self):
"""Build the HTML representation for IPython."""
map_types = {'base': 'ipynb_repr.html',
'geojson': 'ipynb_iframe.html'}
# Check current map type.
type_temp = map_types[self.map_type]
if self.render_iframe:
type_temp = 'ipynb_iframe.html'
templ = self.env.get_template(type_temp)
self._build_map(html_templ=templ, templ_type='temp')
if self.map_type == 'geojson' or self.render_iframe:
if not self.map_path:
raise ValueError('Use create_map to set the path!')
return templ.render(path=self.map_path, width=self.width,
height=self.height)
return self.HTML
def display(self):
"""Display the visualization inline in the IPython notebook.
This is deprecated, use the following instead::
from IPython.display import display
display(viz)
"""
from IPython.core.display import display, HTML
display(HTML(self._repr_html_()))
| mit |
Engensmax/Truss-Builder | truss_builder.py | 1 | 22286 | from scipy import optimize
from matplotlib import pyplot
from evaluation import objective_function
import pickle
import os
import subprocess
pyplot.ion()
# STARTS OBJECTIVE FUNCTION DEPENDING ON THE VARIABLES TO OPTIMIZE
def optimizer(x, input1, input2):
# Apparently the optimization toolbox from scipy turns scalar x inputs into lists with 1 element.
if len(x) == 1:
input1[options['optimization_variables']] = x[0]
else:
input1[options['optimization_variables']] = x
return objective_function(input1, input2)
# PLOTS x VS y AND SAVES THE PLOT AS A PICTURE
def plot_output(pickle_file_path, x, y, saving_path):
p_output = pickle.load(open(pickle_file_path, 'rb'))
x_axis = list()
y_axis = list()
for results in p_output:
x_axis.append(results[str(x)])
y_axis.append(results[str(y)])
pyplot.plot(x_axis, y_axis)
pyplot.xlabel(x)
pyplot.ylabel(y)
pyplot.title(saving_path)
pyplot.grid(True)
pyplot.savefig(saving_path)
subprocess.Popen(saving_path, shell=True)
# OPENS THE CSV
def open_csv(output_file_path):
subprocess.Popen(str(output_file_path), shell=True)
inputs = dict()
bounds = dict()
options = dict()
########################################################################################################################
########################################################################################################################
# ####################### INPUT FROM USER ###################### #
########################################################################################################################
########################################################################################################################
# SMALL LIBRARIES
# Contains the number of different thicknesses on each cell:
truss_thicknesses_library = dict(cubes=3, body_centered_cubes=7, truncated_cubes=9, varying_truncated_cubes=9,
face_diagonal_cubes=6, face_diagonal_cubes_alt=6, octetrahedrons=6, octahedrons=6,
void_octetrahedrons=6, diamonds=4, templar_crosses=1, templar_alt_crosses=1,
templar_alt2_crosses=1, pyramids=3, file_super_truss=1, tetroctas=20,
truncated_octahedrons=6)
# Contains the number of different ratios on each cell (for topology optimization):
truss_ratio_library = dict(cubes=0, body_centered_cubes=0, truncated_cubes=0, varying_truncated_cubes=1,
face_diagonal_cubes=0, face_diagonal_cubes_alt=0, octetrahedrons=0, octahedrons=0,
void_octetrahedrons=0, diamonds=0, templar_crosses=3, templar_alt_crosses=3,
templar_alt2_crosses=3, pyramids=1, file_super_truss=0, tetroctas=0, truncated_octahedrons=1)
# Dictionary of materials: E_Modulus: [N/mm^2] = [MPa]
material_library = dict(MED610=dict(name='MED610', E_Modulus=2.5e3, poisson_ratio=0.33), # Med-grade Polymer
PLA=dict(name='PLA', E_Modulus=2.5e3, poisson_ratio=0.33), # Poly-lactic acid
TCP=dict(name='TCP', E_Modulus=22e3, poisson_ratio=0.33), # Tricalcium Phosphate
HA=dict(name='HA', E_Modulus=6e3, poisson_ratio=0.33), # Hydroxyapatite
Titanium=dict(name='Titanium', E_Modulus=116e3, poisson_ratio=0.32),
Relative=dict(name='Relative', E_Modulus=100, poisson_ratio=0.33)
)
########################################################################################################################
# INPUTS
# Directory where abaqus loads and saves files. The directory will be created if it doesn't exist already.
# This generated data is rather large:
inputs['calculating_directory'] = "C://abaqus_temp/"
# Directory where outputs such as csv and pickle get saved. The directory will be created if it doesn't exist already.
# This generated data is very small:
inputs['output_directory'] = 'C://Users/maxe/Dropbox/Master_Thesis/outputs/final/Titanium_bone/'
# Affix to every file in the calculating directory:
inputs['job_name'] = "temp_output"
# Name (and therefore topology) of the truss. Multiple names can be put in to iterate through all.
# For a single input, put it in brackets. For example: ['cubes']
# inputs['truss_names'] = ['cubes'] # , 'diamonds', 'truncated_cubes']
# inputs['truss_names'] = ['truncated_cubes', 'face_diagonal_cubes_alt',
# 'octetrahedrons', 'octahedrons', 'void_octetrahedrons', 'diamonds', 'pyramids',
# 'truncated_octahedrons']
inputs['truss_names'] = ['cubes', 'body_centered_cubes', 'truncated_cubes', 'face_diagonal_cubes_alt',
'octetrahedrons', 'octahedrons', 'void_octetrahedrons', 'diamonds', 'pyramids',
'truncated_octahedrons']
# Material. See material_library to add or see materials.
inputs['material'] = material_library['Titanium']
# Cell topology
# Number of cells in one direction (Total number of cells is number_of_cells ** 2):
inputs['number_of_cells'] = 3
# Length of one cell (Total size is cell_size * number_of_cells):
inputs['cell_size'] = 1.5 # [mm]
# Minimal Thickness that can be used:
inputs['strut_min_thickness'] = 0.1 # [mm]
# A list will be created inputs['strut_thickness_multiplicator'] that has the needed length depending on cell topoology
# This lists elements will all be inputs['strut_min_thickness'] * inputs['strut_thickness_multiplier']
inputs['strut_thickness_multiplier'] = 5
inputs['cell_ratio_multiplier'] = 0.5
########################################################################################################################
########################################################################################################################
# OPTIONS
# Creates the loading steps for the wireframe evaluation:
options['create_steps'] = True
# Submits the job and evaluates the created steps:
options['submit_job'] = True
# Generates the solid model of the truss and exports it into an stl-file. Needed to calculate volumetric outputs:
options['stl_generate'] = False
# Cuts the borders of the truss off:
options['cutoff'] = True
# Runs the program with the GUI (Generated User Interface). This blocks submitting of the job:
options['gui'] = False
# Overwrites the csv result_file instead of appending the new results
# The result gets saved in the output_directory
# If there hasn't been an output file yet, this needs to be True:
options['overwrite_csv'] = True
# Overwrites the pickled result file instead of appending the new results
# (The result gets saved in the output_directory
# If there hasn't been an output file yet, this needs to be True:
options['overwrite_pickle'] = True
# Viewers
# Opens the stl (STereoLithography) after completion:
options['stl_view'] = False
# Opens the odb (Output DataBase) after completion:
options['odb_view'] = False
# Opens the csv (Comma Separated Values) of the output after completion:
options['csv_view'] = False
# Cross Section Input
# Cross Section of the Struts for the Solid and Stl Model. Can be "square", "hexagon", "octagon" or "dodecagon":
options['strut_cross_section'] = 'octagon'
# Version Input
# To determine Version: use windows command prompt: "abaqus cae nogui" and then ">>> version" or ">>> print(version)"
# If the student version is used add SE in the end. F.e. '6.14-2SE'
# This is used for defining the path of where to find the abaqus_plugin stlExport. See class Script : __init__()
options['abaqus_version'] = '6.14-1'
# File Path
# This is usually C:/SIMULIA/Abaqus
options['abaqus_path'] = "C:/Program Files/Abaqus/"
########################################################################################################################
# METHOD to run the engine. Possible entries are 'single_run', 'loop' or 'optimization'
options['method'] = 'optimization'
# LOOP specific (applies for options['method'] == 'loop')
# Loop over first variable:
options['loop1_variable'] = 'strut_min_thickness'
# List of all the values to evaluate
options['loop1_values'] = [0.05 * a for a in range(1, 21)]
# Optional loop over second variable:
# Use 'None' if there is to be no loop
options['loop2_variable'] = 'None'
# Use [0] if there is to be no loop
options['loop2_values'] = [0]
# OPTIMIZATION specific
# Decides what to optimize for:
options['optimization_variables'] = 'strut_thickness_multiplicator'
# Bounds of the optimization variables. This only applies to some algorithms.
options['bounds'] = (1, 10)
# Define Fitness variables, their target value, their norming and their weighting.
# options['fitness_variables'] = {'Sigma_z': [30e3, 1e-2, 1]}
options['fitness_variables'] = {'Sigma_x': [12e3, 1e-2, 1],
'Sigma_y': [12e3, 1e-2, 1],
'Sigma_z': [15e3, 1e-2, 1]}
# Plot the fitness while optimizing:
options['plot_fitness'] = True
# Defines the algorithm used for the optimization:
options['algorithm'] = 'L-BFGS-B'
# Possible algorithms:
# 'Nelder-Mead', 'Powell', 'CG', 'BFGS', 'Newton-CG', 'L-BFGS-B', 'TNC', 'COBYLA', 'SLSQP', 'dogleg', 'trust-ncg'
# Options for the optimization. See scipy optimization toolbox for further info:
options['options'] = {'disp': True, 'eps': 0.01, 'ftol': 0.05}
# Reruns the results with generating stl and writing all results into one csv:
options['run_results'] = True
########################################################################################################################
# OUTPUT
# Decide what to save in the csv
options['output'] = dict()
# General:
options['output']['Step'] = True
options['output']['Truss_Name'] = True
options['output']['Fitness'] = True
# Geometric properties:
options['output']['Cell_size'] = True
options['output']['Strut_Thickness'] = True # =Strut_Thicknesses[0]
options['output']['Pore_size'] = True
# Mechanic properties:
options['output']["Young's Modulus"] = True
options['output']['Shearing Modulus'] = True
options['output']["Poisson's Ratio"] = True
# Volumetric properties: (will only be calculated if the STL is generated)
options['output']['Volume'] = True
options['output']['Porosity'] = True
options['output']['Void_ratio'] = True
options['output']['Surface_Area'] = True
# Optimization properties:
options['output']['X'] = True # Strut_Thickness_Multiplicator
########################################################################################################################
########################################################################################################################
# ##################### NO MORE INPUT ######################## #
########################################################################################################################
########################################################################################################################
if options['gui']:
options['submit_job'] = False
print("Job will not be submitted if the Script is run with the GUI.")
if not options['stl_generate']:
options['stl_view'] = False
print("Stl cannot be viewed if it is not generated")
if not options['submit_job']:
options['odb_view'] = False
print("Odb cannot be viewed if the job is not submitted")
options['read_output'] = options['submit_job']
# Create folders if they don't exist yet.
if not os.path.exists(inputs['calculating_directory']):
os.mkdir(inputs['calculating_directory'])
if not os.path.exists(inputs['output_directory']):
os.mkdir(inputs['output_directory'])
if options['method'] == 'optimization':
# Initialize csv
file = open(inputs['output_directory'] + "optimization_results.csv", 'w')
file.write("x, fun, nit, success, nfev\n")
file.close()
# Initialize pickle file
pickle.dump(list(), open(inputs['output_directory'] + "pickled_results", 'wb'))
# Loop over the different truss topologies:
for name in inputs['truss_names']:
inputs['truss_name'] = name
bounds[options['optimization_variables']] = [options['bounds']]
inputs['strut_thickness_multiplicator'] = list()
# Multiplied by strut_min_thickness results in the actual thickness of the struts
bounds['strut_thickness_multiplicator'] = list()
# Multiplied by strut_min_thickness results in the actual thickness of the struts bounds
for i in range(0, truss_thicknesses_library[name]):
inputs['strut_thickness_multiplicator'].append(inputs['strut_thickness_multiplier'])
bounds['strut_thickness_multiplicator'].append(options['bounds'])
# Best thicknesses for titanium (1.5mm cell_size)(0.1mm strut_min_thickness) (3x3x3)
# inputs['strut_thickness_multiplicator'] = [4.71177218, 4.14965115, 4.80241845]
inputs['cell_ratio'] = list()
bounds['cell_ratio'] = list()
for j in range(0, truss_ratio_library[name]):
inputs['cell_ratio'].append(inputs['cell_ratio_multiplier'])
bounds['cell_ratio'].append(options['bounds'])
# Best ratio for templar_alt2_crosses:
# inputs['cell_ratio'] = [0.3, 0.8, 0.2]
inputs['output_file'] = inputs['output_directory'] + 'optim_output_' + str(name)
if options['overwrite_csv']:
result_file = open(str(inputs['output_file']) + '.csv', 'w')
if options['output']['Step']:
result_file.write('Step, ')
if options['output']['Truss_Name']:
result_file.write('Truss Name, ')
if options['output']['Fitness']:
result_file.write('Fitness, ')
if options['output']['Cell_size']:
result_file.write('Cell Size [mm], ')
if options['output']['Strut_Thickness']:
result_file.write('Strut Thickness [mu-m], ')
if options['output']['Pore_size']:
result_file.write('Pore Size 1[mu-m], Pore Size 2[mu-m], Pore Size 3[mu-m], Pore Size 4[mu-m], ')
if options['output']["Young's Modulus"]:
result_file.write('Sigma_z [MPa], Sigma_y [MPa], Sigma_x [MPa], ')
if options['output']['Shearing Modulus']:
result_file.write('Tau_yz [MPa], Tau_xz [MPa], Tau_xy [MPa], ')
if options['output']["Poisson's Ratio"]:
result_file.write('v21 [1], v31 [1], v32 [1], ')
if options['output']['Volume']:
result_file.write('Volume [mm^3], ')
if options['output']['Porosity']:
result_file.write('Porosity [%], ')
if options['output']['Void_ratio']:
result_file.write('Void_ratio [1], ')
if options['output']['Surface_Area']:
result_file.write('Surface_Area [mm^2], ')
if options['output']['X']:
result_file.write('Strut_Thickness_Multiplicator [1], ')
result_file.write('\n')
result_file.close()
if options['run_results']:
result_file2 = open(inputs['output_directory'] + "best_results" + '.csv', 'w')
if options['output']['Step']:
result_file2.write('Step, ')
if options['output']['Truss_Name']:
result_file2.write('Truss Name, ')
if options['output']['Fitness']:
result_file2.write('Fitness, ')
if options['output']['Cell_size']:
result_file2.write('Cell Size [mm], ')
if options['output']['Strut_Thickness']:
result_file2.write('Strut Thickness [mu-m], ')
if options['output']['Pore_size']:
result_file2.write('Pore Size 1[mu-m], Pore Size 2[mu-m], Pore Size 3[mu-m], Pore Size 4[mu-m], ')
if options['output']["Young's Modulus"]:
result_file2.write('Sigma_z [MPa], Sigma_y [MPa], Sigma_x [MPa], ')
if options['output']['Shearing Modulus']:
result_file2.write('Tau_yz [MPa], Tau_xz [MPa], Tau_xy [MPa], ')
if options['output']["Poisson's Ratio"]:
result_file2.write('v21 [1], v31 [1], v32 [1], ')
if options['output']['Volume']:
result_file2.write('Volume [mm^3], ')
if options['output']['Porosity']:
result_file2.write('Porosity [%], ')
if options['output']['Void_ratio']:
result_file2.write('Void_ratio [1], ')
if options['output']['Surface_Area']:
result_file2.write('Surface_Area [mm^2], ')
if options['output']['X']:
result_file2.write('Strut_Thickness_Multiplicator [1], ')
result_file2.write('\n')
result_file2.close()
if options['overwrite_pickle']:
output_old = list()
file = open(inputs['output_file'] + "_pickle", 'wb')
pickle.dump(output_old, file)
file.close()
# Counts the number of function evaluations. Careful, this is global
universal_counter = 0
####################################################################################################################
# Calling the engine, depending on the chosen method
if options['method'] == 'single_run':
options['plot_fitness'] = False
objective_function(inputs=inputs, options=options)
elif options['method'] == 'loop':
options['plot_fitness'] = False
for loop1 in options['loop1_values']:
for loop2 in options['loop2_values']:
inputs[options['loop1_variable']] = loop1
inputs[options['loop2_variable']] = loop2
objective_function(inputs=inputs, options=options)
elif options['method'] == 'optimization':
print("Optimization Input:")
print("xo= " + str(inputs[options['optimization_variables']]))
print("inputs= " + str(inputs) + ", " + str(options))
print("options= " + str(options))
print("method= " + str(options['algorithm']))
print("bounds= " + str(bounds[options['optimization_variables']]))
print("options= " + str(options['options']))
print("#################################################################################################\n\n\n")
result = optimize.minimize(optimizer,
x0=inputs[options['optimization_variables']],
args=(inputs,
options),
method=options['algorithm'],
bounds=bounds[options['optimization_variables']],
options=options['options'])
# Save result as CSV:
file = open(inputs['output_directory'] + "optimization_results.csv", 'a')
key_list = ['x', 'fun', 'nit', 'success', 'nfev']
for key in key_list:
file.write(str(result[key]) + ", ")
file.write("\n")
file.close()
# Save result as pickled file:
result_pickle = {'inputs': inputs, 'options': options,
'optimization': {'x': result['x'], 'nfev': result['nfev'],
'nit': result['nit'], 'success': result['success']}}
print(result_pickle['optimization'])
output_old = pickle.load(open(inputs['output_directory'] + "pickled_results", 'rb'))
output_old.append(result_pickle)
file = open(inputs['output_directory'] + "pickled_results", 'wb')
pickle.dump(output_old, file)
file.close()
print("#################################################################################################\n\n\n")
else:
print("options['method'] does not contain a valid keyword. possible entries are: "
"'single_run', 'loop' or 'optimization'.")
####################################################################################################################
if options['csv_view']:
open_csv(str(inputs['output_file']) + '.csv')
if options['method'] == 'optimization' and options['run_results']:
output = pickle.load(open(inputs['output_directory'] + "pickled_results", 'rb'))
for i in range(0, len(output)):
inputs = output[i]['inputs']
options = output[i]['options']
inputs[options['optimization_variables']] = output[i]['optimization']['x']
inputs['output_file'] = inputs['output_directory'] + "best_results"
options['stl_generate'] = True
objective_function(inputs=inputs, options=options)
| mit |
exepulveda/swfc | python/spatial_clustering_kmean_cds4.py | 1 | 1257 | import numpy as np
import pickle
import logging
import argparse
import csv
import matplotlib as mpl
mpl.use('agg')
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from cluster_utils import create_clusters_dict
from plotting import scatter_clusters
import matplotlib.pyplot as plt
if __name__ == "__main__":
filename = "ds4"
X = np.loadtxt("../../data/{dataset}.csv".format(dataset=filename),skiprows=1,delimiter=",")
locations = X[:,0:2]
values = X[:,2:6] #0,1,2,5 are real; 3 and 4 are cats
true_clusters = X[:,6]
n,p = values.shape
nclusters = 4
numerical = [0,1,2,3]
data = values[:,numerical]
standadize = StandardScaler()
data = standadize.fit_transform(data)
clustering = KMeans(n_clusters=4)
kmeans_clusters = clustering.fit_predict(data)
pca = PCA(n_components=2,whiten=True)
pca_X = pca.fit_transform(data)
clustering_pca = KMeans(n_clusters=4)
clusters_pca = clustering_pca.fit_predict(pca_X)
#save data
new_data = np.c_[X,pca_X,kmeans_clusters,clusters_pca]
np.savetxt("../../data/{dataset}_clusters.csv".format(dataset=filename),new_data,delimiter=",",fmt="%.4f")
| gpl-3.0 |
waterponey/scikit-learn | sklearn/mixture/tests/test_gmm.py | 44 | 20880 | # Important note for the deprecation cleaning of 0.20 :
# All the functions and classes of this file have been deprecated in 0.18.
# When you remove this file please remove the related files
# - 'sklearn/mixture/dpgmm.py'
# - 'sklearn/mixture/gmm.py'
# - 'sklearn/mixture/test_dpgmm.py'
import unittest
import copy
import sys
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises)
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn.utils.testing import (assert_true, assert_greater,
assert_raise_message, assert_warns_message,
ignore_warnings)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.externals.six.moves import cStringIO as StringIO
rng = np.random.RandomState(0)
def test_sample_gaussian():
# Test sample generation from mixture.sample_gaussian where covariance
# is diagonal, spherical and full
n_features, n_samples = 2, 300
axis = 1
mu = rng.randint(10) * rng.rand(n_features)
cv = (rng.rand(n_features) + 1.0) ** 2
samples = mixture.gmm._sample_gaussian(
mu, cv, covariance_type='diag', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(samples.var(axis), cv, atol=1.5))
# the same for spherical covariances
cv = (rng.rand() + 1.0) ** 2
samples = mixture.gmm._sample_gaussian(
mu, cv, covariance_type='spherical', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.5))
assert_true(np.allclose(
samples.var(axis), np.repeat(cv, n_features), atol=1.5))
# and for full covariances
A = rng.randn(n_features, n_features)
cv = np.dot(A.T, A) + np.eye(n_features)
samples = mixture.gmm._sample_gaussian(
mu, cv, covariance_type='full', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(np.cov(samples), cv, atol=2.5))
# Numerical stability check: in SciPy 0.12.0 at least, eigh may return
# tiny negative values in its second return value.
x = mixture.gmm._sample_gaussian(
[0, 0], [[4, 3], [1, .1]], covariance_type='full', random_state=42)
assert_true(np.isfinite(x).all())
def _naive_lmvnpdf_diag(X, mu, cv):
# slow and naive implementation of lmvnpdf
ref = np.empty((len(X), len(mu)))
stds = np.sqrt(cv)
for i, (m, std) in enumerate(zip(mu, stds)):
ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
return ref
def test_lmvnpdf_diag():
# test a slow and naive implementation of lmvnpdf and
# compare it to the vectorized version (mixture.lmvnpdf) to test
# for correctness
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
ref = _naive_lmvnpdf_diag(X, mu, cv)
lpr = assert_warns_message(DeprecationWarning, "The function"
" log_multivariate_normal_density is "
"deprecated in 0.18 and will be removed in 0.20.",
mixture.log_multivariate_normal_density,
X, mu, cv, 'diag')
assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
spherecv = rng.rand(n_components, 1) ** 2 + 1
X = rng.randint(10) * rng.rand(n_samples, n_features)
cv = np.tile(spherecv, (n_features, 1))
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = assert_warns_message(DeprecationWarning, "The function"
" log_multivariate_normal_density is "
"deprecated in 0.18 and will be removed in 0.20.",
mixture.log_multivariate_normal_density,
X, mu, spherecv, 'spherical')
assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
fullcv = np.array([np.diag(x) for x in cv])
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = assert_warns_message(DeprecationWarning, "The function"
" log_multivariate_normal_density is "
"deprecated in 0.18 and will be removed in 0.20.",
mixture.log_multivariate_normal_density,
X, mu, fullcv, 'full')
assert_array_almost_equal(lpr, reference)
def test_lvmpdf_full_cv_non_positive_definite():
n_features, n_samples = 2, 10
rng = np.random.RandomState(0)
X = rng.randint(10) * rng.rand(n_samples, n_features)
mu = np.mean(X, 0)
cv = np.array([[[-1, 0], [0, 1]]])
expected_message = "'covars' must be symmetric, positive-definite"
assert_raise_message(ValueError, expected_message,
mixture.log_multivariate_normal_density,
X, mu, cv, 'full')
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_GMM_attributes():
n_components, n_features = 10, 4
covariance_type = 'diag'
g = mixture.GMM(n_components, covariance_type, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = rng.randint(-20, 20, (n_components, n_features))
assert_true(g.n_components == n_components)
assert_true(g.covariance_type == covariance_type)
g.weights_ = weights
assert_array_almost_equal(g.weights_, weights)
g.means_ = means
assert_array_almost_equal(g.means_, means)
covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
g.covars_ = covars
assert_array_almost_equal(g.covars_, covars)
assert_raises(ValueError, g._set_covars, [])
assert_raises(ValueError, g._set_covars,
np.zeros((n_components - 2, n_features)))
assert_raises(ValueError, mixture.GMM, n_components=20,
covariance_type='badcovariance_type')
class GMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 4
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
self.threshold = -0.5
self.I = np.eye(self.n_features)
self.covars = {
'spherical': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'tied': (make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I),
'diag': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'full': np.array([make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I for x in range(self.n_components)])}
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_eval(self):
if not self.do_test_eval:
return # DPGMM does not support setting the means and
# covariances before fitting There is no way of fixing this
# due to the variational parameters being more expressive than
# covariance matrices
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = self.covars[self.covariance_type]
g.weights_ = self.weights
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
with ignore_warnings(category=DeprecationWarning):
ll, responsibilities = g.score_samples(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_sample(self, n=100):
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
g.weights_ = self.weights
with ignore_warnings(category=DeprecationWarning):
samples = g.sample(n)
self.assertEqual(samples.shape, (n, self.n_features))
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_train(self, params='wmc'):
g = mixture.GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
with ignore_warnings(category=DeprecationWarning):
g.weights_ = self.weights
g.means_ = self.means
g.covars_ = 20 * self.covars[self.covariance_type]
# Create a training set by sampling from the predefined distribution.
with ignore_warnings(category=DeprecationWarning):
X = g.sample(n_samples=100)
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-1,
n_iter=1, init_params=params)
g.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
with ignore_warnings(category=DeprecationWarning):
for _ in range(5):
g.params = params
g.init_params = ''
g.fit(X)
trainll.append(self.score(g, X))
g.n_iter = 10
g.init_params = ''
g.params = params
g.fit(X) # finish fitting
# Note that the log likelihood will sometimes decrease by a
# very small amount after it has more or less converged due to
# the addition of min_covar to the covariance (to prevent
# underflow). This is why the threshold is set to -0.5
# instead of 0.
with ignore_warnings(category=DeprecationWarning):
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, self.threshold, self.covariance_type, trainll))
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_train_degenerate(self, params='wmc'):
# Train on degenerate data with 0 in some dimensions
# Create a training set by sampling from the predefined
# distribution.
X = rng.randn(100, self.n_features)
X.T[1:] = 0
g = self.model(n_components=2,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-3, n_iter=5,
init_params=params)
with ignore_warnings(category=DeprecationWarning):
g.fit(X)
trainll = g.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_train_1d(self, params='wmc'):
# Train on 1-D data
# Create a training set by sampling from the predefined
# distribution.
X = rng.randn(100, 1)
# X.T[1:] = 0
g = self.model(n_components=2,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-7, n_iter=5,
init_params=params)
with ignore_warnings(category=DeprecationWarning):
g.fit(X)
trainll = g.score(X)
if isinstance(g, mixture.dpgmm._DPGMMBase):
self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
else:
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def score(self, g, X):
with ignore_warnings(category=DeprecationWarning):
return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
covariance_type = 'spherical'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
covariance_type = 'diag'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
covariance_type = 'tied'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
covariance_type = 'full'
model = mixture.GMM
setUp = GMMTester._setUp
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, covariance_type='spherical',
random_state=rng, min_covar=1e-7, n_iter=5)
with ignore_warnings(category=DeprecationWarning):
train1 = g.fit(X).score(X).sum()
g.n_init = 5
train2 = g.fit(X).score(X).sum()
assert_true(train2 >= train1 - 1.e-2)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_n_parameters():
n_samples, n_dim, n_components = 7, 5, 2
X = rng.randn(n_samples, n_dim)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in ['full', 'tied', 'diag', 'spherical']:
with ignore_warnings(category=DeprecationWarning):
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_true(g._n_parameters() == n_params[cv_type])
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
g_full = mixture.GMM(n_components=n_components, covariance_type='full',
random_state=rng, min_covar=1e-7, n_iter=1)
with ignore_warnings(category=DeprecationWarning):
g_full.fit(X)
g_full_bic = g_full.bic(X)
for cv_type in ['tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_array_almost_equal(g.bic(X), g_full_bic)
def assert_fit_predict_correct(model, X):
model2 = copy.deepcopy(model)
predictions_1 = model.fit(X).predict(X)
predictions_2 = model2.fit_predict(X)
assert adjusted_rand_score(predictions_1, predictions_2) == 1.0
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_fit_predict():
"""
test that gmm.fit_predict is equivalent to gmm.fit + gmm.predict
"""
lrng = np.random.RandomState(101)
n_samples, n_dim, n_comps = 100, 2, 2
mu = np.array([[8, 8]])
component_0 = lrng.randn(n_samples, n_dim)
component_1 = lrng.randn(n_samples, n_dim) + mu
X = np.vstack((component_0, component_1))
for m_constructor in (mixture.GMM, mixture.VBGMM, mixture.DPGMM):
model = m_constructor(n_components=n_comps, covariance_type='full',
min_covar=1e-7, n_iter=5,
random_state=np.random.RandomState(0))
assert_fit_predict_correct(model, X)
model = mixture.GMM(n_components=n_comps, n_iter=0)
z = model.fit_predict(X)
assert np.all(z == 0), "Quick Initialization Failed!"
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_aic():
# Test the aic and bic criteria
n_samples, n_dim, n_components = 50, 3, 2
X = rng.randn(n_samples, n_dim)
SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7)
g.fit(X)
aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
bic = (2 * n_samples * SGH * n_dim +
np.log(n_samples) * g._n_parameters())
bound = n_dim * 3. / np.sqrt(n_samples)
assert_true(np.abs(g.aic(X) - aic) / n_samples < bound)
assert_true(np.abs(g.bic(X) - bic) / n_samples < bound)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def check_positive_definite_covars(covariance_type):
r"""Test that covariance matrices do not become non positive definite
Due to the accumulation of round-off errors, the computation of the
covariance matrices during the learning phase could lead to non-positive
definite covariance matrices. Namely the use of the formula:
.. math:: C = (\sum_i w_i x_i x_i^T) - \mu \mu^T
instead of:
.. math:: C = \sum_i w_i (x_i - \mu)(x_i - \mu)^T
while mathematically equivalent, was observed a ``LinAlgError`` exception,
when computing a ``GMM`` with full covariance matrices and fixed mean.
This function ensures that some later optimization will not introduce the
problem again.
"""
rng = np.random.RandomState(1)
# we build a dataset with 2 2d component. The components are unbalanced
# (respective weights 0.9 and 0.1)
X = rng.randn(100, 2)
X[-10:] += (3, 3) # Shift the 10 last points
gmm = mixture.GMM(2, params="wc", covariance_type=covariance_type,
min_covar=1e-3)
# This is a non-regression test for issue #2640. The following call used
# to trigger:
# numpy.linalg.linalg.LinAlgError: 2-th leading minor not positive definite
gmm.fit(X)
if covariance_type == "diag" or covariance_type == "spherical":
assert_greater(gmm.covars_.min(), 0)
else:
if covariance_type == "tied":
covs = [gmm.covars_]
else:
covs = gmm.covars_
for c in covs:
assert_greater(np.linalg.det(c), 0)
def test_positive_definite_covars():
# Check positive definiteness for all covariance types
for covariance_type in ["full", "tied", "diag", "spherical"]:
yield check_positive_definite_covars, covariance_type
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_verbose_first_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_verbose_second_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
| bsd-3-clause |
bird-house/birdhouse-workshop | tutorials/03_plotter_cli/plotter.py | 1 | 2728 | import matplotlib
# no X11 server ... must be run first
# https://github.com/matplotlib/matplotlib/issues/3466/
matplotlib.use('Agg')
import matplotlib.pylab as plt
# import ccrs for map projections
import cartopy.crs as ccrs
from netCDF4 import Dataset
import os
DATADIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', '..', 'data')
AIR_DS = os.path.join(DATADIR, 'air.mon.ltm.nc')
def simple_plot(resource, variable=None, timestep=0, output='plot.png'):
"""
Generates a nice and simple plot.
"""
print("Plotting {}, timestep {} ...".format(resource, timestep))
# Create dataset from resource ... a local NetCDF file or a remote OpenDAP URL
ds = Dataset(resource)
# Get the values of the given variable
values = ds.variables[variable]
# Prepare plot with a given size
fig = plt.figure(figsize=(20, 10))
# add projection
ax = plt.axes(projection=ccrs.PlateCarree())
# Render a contour plot for the timestep
plt.contourf(values[timestep, :, :])
# add background image with coastlines
ax.stock_img()
# ax.set_global()
ax.coastlines()
# add a colorbar
plt.colorbar()
# Save the plot to filesystem
fig.savefig(output)
plt.close()
print("Plot written to {}".format(output))
return output
def test_simple_plot():
# raise NotImplementedError("This test is not implemented yet. Help wanted!")
# run default test
output = simple_plot(resource=AIR_DS, variable='air')
assert output == 'plot.png'
# try an invalid variable
try:
simple_plot(resource=AIR_DS, variable='water')
except KeyError:
pass
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Generates a nice and simple plot from a NetCDF file.')
parser.add_argument('dataset', nargs=1, default=AIR_DS,
help='a NetCDF file or an OpenDAP URL')
parser.add_argument('-V', '--variable', nargs='?', default='air',
help='variable to plot (default: air)')
# TODO: add an optional timestep parameter
# parser.add_argument('-t', '--timestep', nargs='?', default=0, type=int,
# help='timestep to plot (default: 0)')
# TODO: add an optional output paramter for the output filename
args = parser.parse_args()
print("dataset={0.dataset}, variable={0.variable}".format(args))
output = simple_plot(resource=args.dataset[0], variable=args.variable)
# TODO: run simple_plot with timestep parameter
# output = simple_plot(resource=args.dataset[0], variable=args.variable,
# timestep=args.timestep)
print("Output: {}".format(output))
| apache-2.0 |
dlebauer/plantcv | scripts/image_analysis/nir_sv/nir_sv_z2500.py | 2 | 9181 | #!/usr/bin/python
# This is a computational pipeline which uses the plantcv module to sharpen, filter and analyze NIR images
# Pipeline designed for use with Setaria plants at zoom X2500
# The strategy/methodology is adopted from the textbook "Digital Image Processing" by Gonzalez and Woods
# Version 0.9 Max Feldman 7.29.14
import argparse
import scipy
from scipy import ndimage
import sys, os, traceback
import cv2
import numpy as np
from random import randrange
import pygtk
import matplotlib
if not os.getenv('DISPLAY'):
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from matplotlib import cm as cm
from Bio.Statistics.lowess import lowess
import plantcv as pcv
def options():
parser = argparse.ArgumentParser(description="Imaging processing with opencv")
parser.add_argument("-i", "--image", help="Input image file.", required=True)
parser.add_argument("-m", "--roi", help="Input region of interest file.", required=False)
parser.add_argument("-o", "--outdir", help="Output directory for image files.", required=False)
parser.add_argument("-D", "--debug", help="Turn on debug, prints intermediate images.", action="store_true")
args = parser.parse_args()
return args
### Main pipeline
def main():
# Get options
args = options()
if args.debug:
print("Analyzing your image dude...")
# Read image
device = 0
img = cv2.imread(args.image, flags=0)
path, img_name = os.path.split(args.image)
# Read in image which is average of average of backgrounds
img_bkgrd = cv2.imread("bkgrd_ave_z2500.png", flags=0)
# NIR images for burnin2 are up-side down. This may be fixed in later experiments
img = ndimage.rotate(img, 180)
img_bkgrd = ndimage.rotate(img_bkgrd, 180)
# Subtract the image from the image background to make the plant more prominent
device, bkg_sub_img = pcv.image_subtract(img, img_bkgrd, device, args.debug)
if args.debug:
pcv.plot_hist(bkg_sub_img, 'bkg_sub_img')
device, bkg_sub_thres_img = pcv.binary_threshold(bkg_sub_img, 145, 255, 'dark', device, args.debug)
bkg_sub_thres_img = cv2.inRange(bkg_sub_img, 30, 220)
if args.debug:
cv2.imwrite('bkgrd_sub_thres.png', bkg_sub_thres_img)
#device, bkg_sub_thres_img = pcv.binary_threshold_2_sided(img_bkgrd, 50, 190, device, args.debug)
# if a region of interest is specified read it in
roi = cv2.imread(args.roi)
# Start by examining the distribution of pixel intensity values
if args.debug:
pcv.plot_hist(img, 'hist_img')
# Will intensity transformation enhance your ability to isolate object of interest by thesholding?
device, he_img = pcv.HistEqualization(img, device, args.debug)
if args.debug:
pcv.plot_hist(he_img, 'hist_img_he')
# Laplace filtering (identify edges based on 2nd derivative)
device, lp_img = pcv.laplace_filter(img, 1, 1, device, args.debug)
if args.debug:
pcv.plot_hist(lp_img, 'hist_lp')
# Lapacian image sharpening, this step will enhance the darkness of the edges detected
device, lp_shrp_img = pcv.image_subtract(img, lp_img, device, args.debug)
if args.debug:
pcv.plot_hist(lp_shrp_img, 'hist_lp_shrp')
# Sobel filtering
# 1st derivative sobel filtering along horizontal axis, kernel = 1, unscaled)
device, sbx_img = pcv.sobel_filter(img, 1, 0, 1, 1, device, args.debug)
if args.debug:
pcv.plot_hist(sbx_img, 'hist_sbx')
# 1st derivative sobel filtering along vertical axis, kernel = 1, unscaled)
device, sby_img = pcv.sobel_filter(img, 0, 1, 1, 1, device, args.debug)
if args.debug:
pcv.plot_hist(sby_img, 'hist_sby')
# Combine the effects of both x and y filters through matrix addition
# This will capture edges identified within each plane and emphesize edges found in both images
device, sb_img = pcv.image_add(sbx_img, sby_img, device, args.debug)
if args.debug:
pcv.plot_hist(sb_img, 'hist_sb_comb_img')
# Use a lowpass (blurring) filter to smooth sobel image
device, mblur_img = pcv.median_blur(sb_img, 1, device, args.debug)
device, mblur_invert_img = pcv.invert(mblur_img, device, args.debug)
# combine the smoothed sobel image with the laplacian sharpened image
# combines the best features of both methods as described in "Digital Image Processing" by Gonzalez and Woods pg. 169
device, edge_shrp_img = pcv.image_add(mblur_invert_img, lp_shrp_img, device, args.debug)
if args.debug:
pcv.plot_hist(edge_shrp_img, 'hist_edge_shrp_img')
# Perform thresholding to generate a binary image
device, tr_es_img = pcv.binary_threshold(edge_shrp_img, 145, 255, 'dark', device, args.debug)
# Prepare a few small kernels for morphological filtering
kern = np.zeros((3,3), dtype=np.uint8)
kern1 = np.copy(kern)
kern1[1,1:3]=1
kern2 = np.copy(kern)
kern2[1,0:2]=1
kern3 = np.copy(kern)
kern3[0:2,1]=1
kern4 = np.copy(kern)
kern4[1:3,1]=1
# Prepare a larger kernel for dilation
kern[1,0:3]=1
kern[0:3,1]=1
# Perform erosion with 4 small kernels
device, e1_img = pcv.erode(tr_es_img, kern1, 1, device, args.debug)
device, e2_img = pcv.erode(tr_es_img, kern2, 1, device, args.debug)
device, e3_img = pcv.erode(tr_es_img, kern3, 1, device, args.debug)
device, e4_img = pcv.erode(tr_es_img, kern4, 1, device, args.debug)
# Combine eroded images
device, c12_img = pcv.logical_or(e1_img, e2_img, device, args.debug)
device, c123_img = pcv.logical_or(c12_img, e3_img, device, args.debug)
device, c1234_img = pcv.logical_or(c123_img, e4_img, device, args.debug)
# Perform dilation
# device, dil_img = pcv.dilate(c1234_img, kern, 1, device, args.debug)
device, comb_img = pcv.logical_or(c1234_img, bkg_sub_thres_img, device, args.debug)
# Get masked image
# The dilated image may contain some pixels which are not plant
device, masked_erd = pcv.apply_mask(img, comb_img, 'black', device, args.debug)
# device, masked_erd_dil = pcv.apply_mask(img, dil_img, 'black', device, args.debug)
# Need to remove the edges of the image, we did that by generating a set of rectangles to mask the edges
# img is (254 X 320)
# mask for the bottom of the image
device, box1_img, rect_contour1, hierarchy1 = pcv.rectangle_mask(img, (120,184), (215,252), device, args.debug)
# mask for the left side of the image
device, box2_img, rect_contour2, hierarchy2 = pcv.rectangle_mask(img, (1,1), (85,252), device, args.debug)
# mask for the right side of the image
device, box3_img, rect_contour3, hierarchy3 = pcv.rectangle_mask(img, (240,1), (318,252), device, args.debug)
# mask the edges
device, box4_img, rect_contour4, hierarchy4 = pcv.border_mask(img, (1,1), (318,252), device, args.debug)
# combine boxes to filter the edges and car out of the photo
device, bx12_img = pcv.logical_or(box1_img, box2_img, device, args.debug)
device, bx123_img = pcv.logical_or(bx12_img, box3_img, device, args.debug)
device, bx1234_img = pcv.logical_or(bx123_img, box4_img, device, args.debug)
device, inv_bx1234_img = pcv.invert(bx1234_img, device, args.debug)
# Make a ROI around the plant, include connected objects
# Apply the box mask to the image
# device, masked_img = pcv.apply_mask(masked_erd_dil, inv_bx1234_img, 'black', device, args.debug)
device, edge_masked_img = pcv.apply_mask(masked_erd, inv_bx1234_img, 'black', device, args.debug)
device, roi_img, roi_contour, roi_hierarchy = pcv.rectangle_mask(img, (120,75), (200,184), device, args.debug)
plant_objects, plant_hierarchy = cv2.findContours(edge_masked_img,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
device, roi_objects, hierarchy5, kept_mask, obj_area = pcv.roi_objects(img, 'partial', roi_contour, roi_hierarchy, plant_objects, plant_hierarchy, device, args.debug)
# Apply the box mask to the image
# device, masked_img = pcv.apply_mask(masked_erd_dil, inv_bx1234_img, 'black', device, args.debug)
device, masked_img = pcv.apply_mask(kept_mask, inv_bx1234_img, 'black', device, args.debug)
rgb = cv2.cvtColor(img,cv2.COLOR_GRAY2RGB)
# Generate a binary to send to the analysis function
device, mask = pcv.binary_threshold(masked_img, 1, 255, 'light', device, args.debug)
mask3d = np.copy(mask)
plant_objects_2, plant_hierarchy_2 = cv2.findContours(mask3d,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
device, o, m = pcv.object_composition(rgb, roi_objects, hierarchy5, device, args.debug)
### Analysis ###
device, hist_header, hist_data, h_norm = pcv.analyze_NIR_intensity(img, args.image, mask, 256, device, args.debug, args.outdir + '/' + img_name)
device, shape_header, shape_data, ori_img = pcv.analyze_object(rgb, args.image, o, m, device, args.debug, args.outdir + '/' + img_name)
pcv.print_results(args.image, hist_header, hist_data)
pcv.print_results(args.image, shape_header, shape_data)
if __name__ == '__main__':
main() | gpl-2.0 |
shaneknapp/spark | python/pyspark/sql/pandas/serializers.py | 23 | 12308 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Serializers for PyArrow and pandas conversions. See `pyspark.serializers` for more details.
"""
from pyspark.serializers import Serializer, read_int, write_int, UTF8Deserializer
class SpecialLengths(object):
END_OF_DATA_SECTION = -1
PYTHON_EXCEPTION_THROWN = -2
TIMING_DATA = -3
END_OF_STREAM = -4
NULL = -5
START_ARROW_STREAM = -6
class ArrowCollectSerializer(Serializer):
"""
Deserialize a stream of batches followed by batch order information. Used in
PandasConversionMixin._collect_as_arrow() after invoking Dataset.collectAsArrowToPython()
in the JVM.
"""
def __init__(self):
self.serializer = ArrowStreamSerializer()
def dump_stream(self, iterator, stream):
return self.serializer.dump_stream(iterator, stream)
def load_stream(self, stream):
"""
Load a stream of un-ordered Arrow RecordBatches, where the last iteration yields
a list of indices that can be used to put the RecordBatches in the correct order.
"""
# load the batches
for batch in self.serializer.load_stream(stream):
yield batch
# load the batch order indices or propagate any error that occurred in the JVM
num = read_int(stream)
if num == -1:
error_msg = UTF8Deserializer().loads(stream)
raise RuntimeError("An error occurred while calling "
"ArrowCollectSerializer.load_stream: {}".format(error_msg))
batch_order = []
for i in range(num):
index = read_int(stream)
batch_order.append(index)
yield batch_order
def __repr__(self):
return "ArrowCollectSerializer(%s)" % self.serializer
class ArrowStreamSerializer(Serializer):
"""
Serializes Arrow record batches as a stream.
"""
def dump_stream(self, iterator, stream):
import pyarrow as pa
writer = None
try:
for batch in iterator:
if writer is None:
writer = pa.RecordBatchStreamWriter(stream, batch.schema)
writer.write_batch(batch)
finally:
if writer is not None:
writer.close()
def load_stream(self, stream):
import pyarrow as pa
reader = pa.ipc.open_stream(stream)
for batch in reader:
yield batch
def __repr__(self):
return "ArrowStreamSerializer"
class ArrowStreamPandasSerializer(ArrowStreamSerializer):
"""
Serializes Pandas.Series as Arrow data with Arrow streaming format.
Parameters
----------
timezone : str
A timezone to respect when handling timestamp values
safecheck : bool
If True, conversion from Arrow to Pandas checks for overflow/truncation
assign_cols_by_name : bool
If True, then Pandas DataFrames will get columns by name
"""
def __init__(self, timezone, safecheck, assign_cols_by_name):
super(ArrowStreamPandasSerializer, self).__init__()
self._timezone = timezone
self._safecheck = safecheck
self._assign_cols_by_name = assign_cols_by_name
def arrow_to_pandas(self, arrow_column):
from pyspark.sql.pandas.types import _check_series_localize_timestamps, \
_convert_map_items_to_dict
import pyarrow
# If the given column is a date type column, creates a series of datetime.date directly
# instead of creating datetime64[ns] as intermediate data to avoid overflow caused by
# datetime64[ns] type handling.
s = arrow_column.to_pandas(date_as_object=True)
if pyarrow.types.is_timestamp(arrow_column.type):
return _check_series_localize_timestamps(s, self._timezone)
elif pyarrow.types.is_map(arrow_column.type):
return _convert_map_items_to_dict(s)
else:
return s
def _create_batch(self, series):
"""
Create an Arrow record batch from the given pandas.Series or list of Series,
with optional type.
Parameters
----------
series : pandas.Series or list
A single series, list of series, or list of (series, arrow_type)
Returns
-------
pyarrow.RecordBatch
Arrow RecordBatch
"""
import pandas as pd
import pyarrow as pa
from pyspark.sql.pandas.types import _check_series_convert_timestamps_internal, \
_convert_dict_to_map_items
from pandas.api.types import is_categorical_dtype
# Make input conform to [(series1, type1), (series2, type2), ...]
if not isinstance(series, (list, tuple)) or \
(len(series) == 2 and isinstance(series[1], pa.DataType)):
series = [series]
series = ((s, None) if not isinstance(s, (list, tuple)) else s for s in series)
def create_array(s, t):
mask = s.isnull()
# Ensure timestamp series are in expected form for Spark internal representation
if t is not None and pa.types.is_timestamp(t):
s = _check_series_convert_timestamps_internal(s, self._timezone)
elif t is not None and pa.types.is_map(t):
s = _convert_dict_to_map_items(s)
elif is_categorical_dtype(s.dtype):
# Note: This can be removed once minimum pyarrow version is >= 0.16.1
s = s.astype(s.dtypes.categories.dtype)
try:
array = pa.Array.from_pandas(s, mask=mask, type=t, safe=self._safecheck)
except ValueError as e:
if self._safecheck:
error_msg = "Exception thrown when converting pandas.Series (%s) to " + \
"Arrow Array (%s). It can be caused by overflows or other " + \
"unsafe conversions warned by Arrow. Arrow safe type check " + \
"can be disabled by using SQL config " + \
"`spark.sql.execution.pandas.convertToArrowArraySafely`."
raise ValueError(error_msg % (s.dtype, t)) from e
else:
raise e
return array
arrs = []
for s, t in series:
if t is not None and pa.types.is_struct(t):
if not isinstance(s, pd.DataFrame):
raise ValueError("A field of type StructType expects a pandas.DataFrame, "
"but got: %s" % str(type(s)))
# Input partition and result pandas.DataFrame empty, make empty Arrays with struct
if len(s) == 0 and len(s.columns) == 0:
arrs_names = [(pa.array([], type=field.type), field.name) for field in t]
# Assign result columns by schema name if user labeled with strings
elif self._assign_cols_by_name and any(isinstance(name, str)
for name in s.columns):
arrs_names = [(create_array(s[field.name], field.type), field.name)
for field in t]
# Assign result columns by position
else:
arrs_names = [(create_array(s[s.columns[i]], field.type), field.name)
for i, field in enumerate(t)]
struct_arrs, struct_names = zip(*arrs_names)
arrs.append(pa.StructArray.from_arrays(struct_arrs, struct_names))
else:
arrs.append(create_array(s, t))
return pa.RecordBatch.from_arrays(arrs, ["_%d" % i for i in range(len(arrs))])
def dump_stream(self, iterator, stream):
"""
Make ArrowRecordBatches from Pandas Series and serialize. Input is a single series or
a list of series accompanied by an optional pyarrow type to coerce the data to.
"""
batches = (self._create_batch(series) for series in iterator)
super(ArrowStreamPandasSerializer, self).dump_stream(batches, stream)
def load_stream(self, stream):
"""
Deserialize ArrowRecordBatches to an Arrow table and return as a list of pandas.Series.
"""
batches = super(ArrowStreamPandasSerializer, self).load_stream(stream)
import pyarrow as pa
for batch in batches:
yield [self.arrow_to_pandas(c) for c in pa.Table.from_batches([batch]).itercolumns()]
def __repr__(self):
return "ArrowStreamPandasSerializer"
class ArrowStreamPandasUDFSerializer(ArrowStreamPandasSerializer):
"""
Serializer used by Python worker to evaluate Pandas UDFs
"""
def __init__(self, timezone, safecheck, assign_cols_by_name, df_for_struct=False):
super(ArrowStreamPandasUDFSerializer, self) \
.__init__(timezone, safecheck, assign_cols_by_name)
self._df_for_struct = df_for_struct
def arrow_to_pandas(self, arrow_column):
import pyarrow.types as types
if self._df_for_struct and types.is_struct(arrow_column.type):
import pandas as pd
series = [super(ArrowStreamPandasUDFSerializer, self).arrow_to_pandas(column)
.rename(field.name)
for column, field in zip(arrow_column.flatten(), arrow_column.type)]
s = pd.concat(series, axis=1)
else:
s = super(ArrowStreamPandasUDFSerializer, self).arrow_to_pandas(arrow_column)
return s
def dump_stream(self, iterator, stream):
"""
Override because Pandas UDFs require a START_ARROW_STREAM before the Arrow stream is sent.
This should be sent after creating the first record batch so in case of an error, it can
be sent back to the JVM before the Arrow stream starts.
"""
def init_stream_yield_batches():
should_write_start_length = True
for series in iterator:
batch = self._create_batch(series)
if should_write_start_length:
write_int(SpecialLengths.START_ARROW_STREAM, stream)
should_write_start_length = False
yield batch
return ArrowStreamSerializer.dump_stream(self, init_stream_yield_batches(), stream)
def __repr__(self):
return "ArrowStreamPandasUDFSerializer"
class CogroupUDFSerializer(ArrowStreamPandasUDFSerializer):
def load_stream(self, stream):
"""
Deserialize Cogrouped ArrowRecordBatches to a tuple of Arrow tables and yield as two
lists of pandas.Series.
"""
import pyarrow as pa
dataframes_in_group = None
while dataframes_in_group is None or dataframes_in_group > 0:
dataframes_in_group = read_int(stream)
if dataframes_in_group == 2:
batch1 = [batch for batch in ArrowStreamSerializer.load_stream(self, stream)]
batch2 = [batch for batch in ArrowStreamSerializer.load_stream(self, stream)]
yield (
[self.arrow_to_pandas(c) for c in pa.Table.from_batches(batch1).itercolumns()],
[self.arrow_to_pandas(c) for c in pa.Table.from_batches(batch2).itercolumns()]
)
elif dataframes_in_group != 0:
raise ValueError(
'Invalid number of pandas.DataFrames in group {0}'.format(dataframes_in_group))
| apache-2.0 |
Winterflower/dx | dx/dx_plot.py | 2 | 4168 | #
# DX Analytics
# Helper Function for Plotting
# dx_plot.py
#
# DX Analytics is a financial analytics library, mainly for
# derviatives modeling and pricing by Monte Carlo simulation
#
# (c) Dr. Yves J. Hilpisch
# The Python Quants GmbH
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from pylab import cm
def plot_option_stats(s_list, pv, de, ve):
''' Plot option prices, deltas and vegas for a set of
different initial values of the underlying.
Parameters
==========
s_list : array or list
set of intial values of the underlying
pv : array or list
present values
de : array of list
results for deltas
ve : array of list
results for vega
'''
plt.figure(figsize=(9, 7))
sub1 = plt.subplot(311)
plt.plot(s_list, pv, 'ro', label='Present Value')
plt.plot(s_list, pv, 'b')
plt.grid(True); plt.legend(loc=0)
plt.setp(sub1.get_xticklabels(), visible=False)
sub2 = plt.subplot(312)
plt.plot(s_list, de, 'go', label='Delta')
plt.plot(s_list, de, 'b')
plt.grid(True); plt.legend(loc=0)
plt.setp(sub2.get_xticklabels(), visible=False)
sub3 = plt.subplot(313)
plt.plot(s_list, ve, 'yo', label='Vega')
plt.plot(s_list, ve, 'b')
plt.xlabel('Initial Value of Underlying')
plt.grid(True); plt.legend(loc=0)
def plot_greeks_3d(inputs, labels):
''' Plot Greeks in 3d.
Parameters
==========
inputs : list of arrays
x, y, z arrays
labels : list of strings
labels for x, y, z
'''
x, y, z = inputs
xl, yl, zl = labels
fig = plt.figure(figsize=(10, 7))
ax = fig.gca(projection='3d')
surf = ax.plot_surface(x, y, z, rstride=1, cstride=1,
cmap=cm.coolwarm, linewidth=0.5, antialiased=True)
ax.set_xlabel(xl)
ax.set_ylabel(yl)
ax.set_zlabel(zl)
fig.colorbar(surf, shrink=0.5, aspect=5)
def plot_calibration_results(cali, relative=False):
''' Plot calibration results.
Parameters
==========
cali : instance of calibration class
instance has to have opt_parameters
relative : boolean
if True, then relative error reporting
if False, absolute error reporting
'''
cali.update_model_values()
mats = set(cali.option_data[:, 0])
mats = np.sort(list(mats))
fig, axarr = plt.subplots(len(mats), 2, sharex=True)
fig.set_size_inches(8, 12)
fig.subplots_adjust(wspace=0.2, hspace=0.2)
z = 0
for T in mats:
strikes = strikes = cali.option_data[cali.option_data[:, 0] == T][:, 1]
market = cali.option_data[cali.option_data[:, 0] == T][:, 2]
model = cali.model_values[cali.model_values[:, 0] == T][:, 2]
axarr[z, 0].set_ylabel('%s' % str(T)[:10])
axarr[z, 0].plot(strikes, market, label='Market Quotes')
axarr[z, 0].plot(strikes, model, 'ro', label='Model Prices')
axarr[z, 0].grid()
if T is mats[0]:
axarr[z, 0].set_title('Option Quotes')
if T is mats[-1]:
axarr[z, 0].set_xlabel('Strike')
wi = 2.
if relative is True:
axarr[z, 1].bar(strikes - wi / 2,
(model - market) / market * 100, width=wi)
else:
axarr[z, 1].bar(strikes - wi / 2, model - market, width=wi)
axarr[z, 1].grid()
if T is mats[0]:
axarr[z, 1].set_title('Differences')
if T is mats[-1]:
axarr[z, 1].set_xlabel('Strike')
z += 1 | agpl-3.0 |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/pandas/tests/frame/test_analytics.py | 3 | 76384 | # -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import timedelta
from distutils.version import LooseVersion
import sys
import pytest
from string import ascii_lowercase
from numpy import nan
from numpy.random import randn
import numpy as np
from pandas.compat import lrange, product
from pandas import (compat, isnull, notnull, DataFrame, Series,
MultiIndex, date_range, Timestamp)
import pandas as pd
import pandas.core.nanops as nanops
import pandas.core.algorithms as algorithms
import pandas.io.formats.printing as printing
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameAnalytics(TestData):
# ---------------------------------------------------------------------=
# Correlation and covariance
def test_corr_pearson(self):
tm._skip_if_no_scipy()
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
self._check_method('pearson')
def test_corr_kendall(self):
tm._skip_if_no_scipy()
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
self._check_method('kendall')
def test_corr_spearman(self):
tm._skip_if_no_scipy()
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
self._check_method('spearman')
def _check_method(self, method='pearson', check_minp=False):
if not check_minp:
correls = self.frame.corr(method=method)
exp = self.frame['A'].corr(self.frame['C'], method=method)
tm.assert_almost_equal(correls['A']['C'], exp)
else:
result = self.frame.corr(min_periods=len(self.frame) - 8)
expected = self.frame.corr()
expected.loc['A', 'B'] = expected.loc['B', 'A'] = nan
tm.assert_frame_equal(result, expected)
def test_corr_non_numeric(self):
tm._skip_if_no_scipy()
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
# exclude non-numeric types
result = self.mixed_frame.corr()
expected = self.mixed_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
tm.assert_frame_equal(result, expected)
def test_corr_nooverlap(self):
tm._skip_if_no_scipy()
# nothing in common
for meth in ['pearson', 'kendall', 'spearman']:
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
assert isnull(rs.loc['A', 'B'])
assert isnull(rs.loc['B', 'A'])
assert rs.loc['A', 'A'] == 1
assert rs.loc['B', 'B'] == 1
assert isnull(rs.loc['C', 'C'])
def test_corr_constant(self):
tm._skip_if_no_scipy()
# constant --> all NA
for meth in ['pearson', 'spearman']:
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
assert isnull(rs.values).all()
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
# it works!
df3.cov()
df3.corr()
def test_corr_int_and_boolean(self):
tm._skip_if_no_scipy()
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
tm.assert_frame_equal(df.corr(meth), expected)
def test_corr_cov_independent_index_column(self):
# GH 14617
df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4),
columns=list("abcd"))
for method in ['cov', 'corr']:
result = getattr(df, method)()
assert result.index is not result.columns
assert result.index.equals(result.columns)
def test_cov(self):
# min_periods no NAs (corner case)
expected = self.frame.cov()
result = self.frame.cov(min_periods=len(self.frame))
tm.assert_frame_equal(expected, result)
result = self.frame.cov(min_periods=len(self.frame) + 1)
assert isnull(result.values).all()
# with NAs
frame = self.frame.copy()
frame['A'][:5] = nan
frame['B'][5:10] = nan
result = self.frame.cov(min_periods=len(self.frame) - 8)
expected = self.frame.cov()
expected.loc['A', 'B'] = np.nan
expected.loc['B', 'A'] = np.nan
# regular
self.frame['A'][:5] = nan
self.frame['B'][:10] = nan
cov = self.frame.cov()
tm.assert_almost_equal(cov['A']['C'],
self.frame['A'].cov(self.frame['C']))
# exclude non-numeric types
result = self.mixed_frame.cov()
expected = self.mixed_frame.loc[:, ['A', 'B', 'C', 'D']].cov()
tm.assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
df.loc[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_corrwith(self):
a = self.tsframe
noise = Series(randn(len(a)), index=a.index)
b = self.tsframe.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
tm.assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
tm.assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
assert 'B' not in dropped
dropped = a.corrwith(b, axis=1, drop=True)
assert a.index[-1] not in dropped.index
# non time-series data
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(randn(5, 4), index=index, columns=columns)
df2 = DataFrame(randn(4, 4), index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
tm.assert_almost_equal(correls[row],
df1.loc[row].corr(df2.loc[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
tm.assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
def test_corrwith_series(self):
result = self.tsframe.corrwith(self.tsframe['A'])
expected = self.tsframe.apply(self.tsframe['A'].corr)
tm.assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'], df2['a'])[0][1]
tm.assert_almost_equal(c1, c2)
assert c1 < 1
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame({'int_data': [5, 30, df.int_data.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=['bool'])
expected = DataFrame({'bool_data': [5, 2, False, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_bool_frame(self):
# GH 13891
df = pd.DataFrame({
'bool_data_1': [False, False, True, True],
'bool_data_2': [False, True, True, True]
})
result = df.describe()
expected = DataFrame({'bool_data_1': [4, 2, True, 2],
'bool_data_2': [4, 2, True, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True, False],
'int_data': [0, 1, 2, 3, 4]
})
result = df.describe()
expected = DataFrame({'int_data': [5, 2, df.int_data.std(), 0, 1,
2, 3, 4]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True],
'str_data': ['a', 'b', 'c', 'a']
})
result = df.describe()
expected = DataFrame({'bool_data': [4, 2, True, 2],
'str_data': [4, 3, 'a', 2]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_categorical_columns(self):
# GH 11558
columns = pd.CategoricalIndex(['int1', 'int2', 'obj'],
ordered=True, name='XXX')
df = DataFrame({'int1': [10, 20, 30, 40, 50],
'int2': [10, 20, 30, 40, 50],
'obj': ['A', 0, None, 'X', 1]},
columns=columns)
result = df.describe()
exp_columns = pd.CategoricalIndex(['int1', 'int2'],
categories=['int1', 'int2', 'obj'],
ordered=True, name='XXX')
expected = DataFrame({'int1': [5, 30, df.int1.std(),
10, 20, 30, 40, 50],
'int2': [5, 30, df.int2.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'],
columns=exp_columns)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values,
expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
freq='MS', tz='US/Eastern', name='XXX')
df = DataFrame({0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ['A', 0, None, 'X', 1]})
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01'],
freq='MS', tz='US/Eastern', name='XXX')
expected = DataFrame({0: [5, 30, df.iloc[:, 0].std(),
10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
assert result.columns.freq == 'MS'
assert result.columns.tz == expected.columns.tz
def test_describe_timedelta_values(self):
# GH 6145
t1 = pd.timedelta_range('1 days', freq='D', periods=5)
t2 = pd.timedelta_range('1 hours', freq='H', periods=5)
df = pd.DataFrame({'t1': t1, 't2': t2})
expected = DataFrame({'t1': [5, pd.Timedelta('3 days'),
df.iloc[:, 0].std(),
pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days'),
pd.Timedelta('4 days'),
pd.Timedelta('5 days')],
't2': [5, pd.Timedelta('3 hours'),
df.iloc[:, 1].std(),
pd.Timedelta('1 hours'),
pd.Timedelta('2 hours'),
pd.Timedelta('3 hours'),
pd.Timedelta('4 hours'),
pd.Timedelta('5 hours')]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
res = df.describe()
tm.assert_frame_equal(res, expected)
exp_repr = (" t1 t2\n"
"count 5 5\n"
"mean 3 days 00:00:00 0 days 03:00:00\n"
"std 1 days 13:56:50.394919 0 days 01:34:52.099788\n"
"min 1 days 00:00:00 0 days 01:00:00\n"
"25% 2 days 00:00:00 0 days 02:00:00\n"
"50% 3 days 00:00:00 0 days 03:00:00\n"
"75% 4 days 00:00:00 0 days 04:00:00\n"
"max 5 days 00:00:00 0 days 05:00:00")
assert repr(res) == exp_repr
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame({
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
'string_data': ['a', 'b', 'c', 'd', 'e'],
})
df.reindex(columns=['bool_data', 'int_data', 'string_data'])
test = df.sum(axis=0)
tm.assert_numpy_array_equal(test.values,
np.array([2, 150, 'abcde'], dtype=object))
tm.assert_series_equal(test, df.T.sum(axis=1))
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f,
has_skipna=False,
has_numeric_only=True,
check_dtype=False,
check_dates=True)
# corner case
frame = DataFrame()
ct1 = frame.count(1)
assert isinstance(ct1, Series)
ct2 = frame.count(0)
assert isinstance(ct2, Series)
# GH #423
df = DataFrame(index=lrange(10))
result = df.count(1)
expected = Series(0, index=df.index)
tm.assert_series_equal(result, expected)
df = DataFrame(columns=lrange(10))
result = df.count(0)
expected = Series(0, index=df.columns)
tm.assert_series_equal(result, expected)
df = DataFrame()
result = df.count()
expected = Series(0, index=[])
tm.assert_series_equal(result, expected)
def test_nunique(self):
f = lambda s: len(algorithms.unique1d(s.dropna()))
self._check_stat_op('nunique', f, has_skipna=False,
check_dtype=False, check_dates=True)
df = DataFrame({'A': [1, 1, 1],
'B': [1, 2, 3],
'C': [1, np.nan, 3]})
tm.assert_series_equal(df.nunique(), Series({'A': 1, 'B': 3, 'C': 2}))
tm.assert_series_equal(df.nunique(dropna=False),
Series({'A': 1, 'B': 3, 'C': 3}))
tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2}))
tm.assert_series_equal(df.nunique(axis=1, dropna=False),
Series({0: 1, 1: 3, 2: 2}))
def test_sum(self):
self._check_stat_op('sum', np.sum, has_numeric_only=True)
# mixed types (with upcasting happening)
self._check_stat_op('sum', np.sum,
frame=self.mixed_float.astype('float32'),
has_numeric_only=True, check_dtype=False,
check_less_precise=True)
def test_stat_operators_attempt_obj_array(self):
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
'b': [-0, -0, 0.0],
'c': [0.00031111847529610595, 0.0014902627951905339,
-0.00094099200035979691]
}
df1 = DataFrame(data, index=['foo', 'bar', 'baz'],
dtype='O')
methods = ['sum', 'mean', 'prod', 'var', 'std', 'skew', 'min', 'max']
# GH #676
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
for meth in methods:
assert df.values.dtype == np.object_
result = getattr(df, meth)(1)
expected = getattr(df.astype('f8'), meth)(1)
if not tm._incompat_bottleneck_version(meth):
tm.assert_series_equal(result, expected)
def test_mean(self):
self._check_stat_op('mean', np.mean, check_dates=True)
def test_product(self):
self._check_stat_op('product', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper, check_dates=True)
def test_min(self):
self._check_stat_op('min', np.min, check_dates=True)
self._check_stat_op('min', np.min, frame=self.intframe)
def test_cummin(self):
self.tsframe.loc[5:10, 0] = nan
self.tsframe.loc[10:15, 1] = nan
self.tsframe.loc[15:, 2] = nan
# axis = 0
cummin = self.tsframe.cummin()
expected = self.tsframe.apply(Series.cummin)
tm.assert_frame_equal(cummin, expected)
# axis = 1
cummin = self.tsframe.cummin(axis=1)
expected = self.tsframe.apply(Series.cummin, axis=1)
tm.assert_frame_equal(cummin, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummin() # noqa
# fix issue
cummin_xs = self.tsframe.cummin(axis=1)
assert np.shape(cummin_xs) == np.shape(self.tsframe)
def test_cummax(self):
self.tsframe.loc[5:10, 0] = nan
self.tsframe.loc[10:15, 1] = nan
self.tsframe.loc[15:, 2] = nan
# axis = 0
cummax = self.tsframe.cummax()
expected = self.tsframe.apply(Series.cummax)
tm.assert_frame_equal(cummax, expected)
# axis = 1
cummax = self.tsframe.cummax(axis=1)
expected = self.tsframe.apply(Series.cummax, axis=1)
tm.assert_frame_equal(cummax, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummax() # noqa
# fix issue
cummax_xs = self.tsframe.cummax(axis=1)
assert np.shape(cummax_xs) == np.shape(self.tsframe)
def test_max(self):
self._check_stat_op('max', np.max, check_dates=True)
self._check_stat_op('max', np.max, frame=self.intframe)
def test_mad(self):
f = lambda x: np.abs(x - x.mean()).mean()
self._check_stat_op('mad', f)
def test_var_std(self):
alt = lambda x: np.var(x, ddof=1)
self._check_stat_op('var', alt)
alt = lambda x: np.std(x, ddof=1)
self._check_stat_op('std', alt)
result = self.tsframe.std(ddof=4)
expected = self.tsframe.apply(lambda x: x.std(ddof=4))
tm.assert_almost_equal(result, expected)
result = self.tsframe.var(ddof=4)
expected = self.tsframe.apply(lambda x: x.var(ddof=4))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
if nanops._USE_BOTTLENECK:
nanops._USE_BOTTLENECK = False
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
nanops._USE_BOTTLENECK = True
def test_numeric_only_flag(self):
# GH #9201
methods = ['sem', 'var', 'std']
df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a number in str format
df1.loc[0, 'foo'] = '100'
df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a non-number str
df2.loc[0, 'foo'] = 'a'
for meth in methods:
result = getattr(df1, meth)(axis=1, numeric_only=True)
expected = getattr(df1[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
result = getattr(df2, meth)(axis=1, numeric_only=True)
expected = getattr(df2[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
# df1 has all numbers, df2 has a letter inside
pytest.raises(TypeError, lambda: getattr(df1, meth)(
axis=1, numeric_only=False))
pytest.raises(TypeError, lambda: getattr(df2, meth)(
axis=1, numeric_only=False))
def test_mixed_ops(self):
# GH 16116
df = DataFrame({'int': [1, 2, 3, 4],
'float': [1., 2., 3., 4.],
'str': ['a', 'b', 'c', 'd']})
for op in ['mean', 'std', 'var', 'skew',
'kurt', 'sem']:
result = getattr(df, op)()
assert len(result) == 2
if nanops._USE_BOTTLENECK:
nanops._USE_BOTTLENECK = False
result = getattr(df, op)()
assert len(result) == 2
nanops._USE_BOTTLENECK = True
def test_cumsum(self):
self.tsframe.loc[5:10, 0] = nan
self.tsframe.loc[10:15, 1] = nan
self.tsframe.loc[15:, 2] = nan
# axis = 0
cumsum = self.tsframe.cumsum()
expected = self.tsframe.apply(Series.cumsum)
tm.assert_frame_equal(cumsum, expected)
# axis = 1
cumsum = self.tsframe.cumsum(axis=1)
expected = self.tsframe.apply(Series.cumsum, axis=1)
tm.assert_frame_equal(cumsum, expected)
# works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cumsum() # noqa
# fix issue
cumsum_xs = self.tsframe.cumsum(axis=1)
assert np.shape(cumsum_xs) == np.shape(self.tsframe)
def test_cumprod(self):
self.tsframe.loc[5:10, 0] = nan
self.tsframe.loc[10:15, 1] = nan
self.tsframe.loc[15:, 2] = nan
# axis = 0
cumprod = self.tsframe.cumprod()
expected = self.tsframe.apply(Series.cumprod)
tm.assert_frame_equal(cumprod, expected)
# axis = 1
cumprod = self.tsframe.cumprod(axis=1)
expected = self.tsframe.apply(Series.cumprod, axis=1)
tm.assert_frame_equal(cumprod, expected)
# fix issue
cumprod_xs = self.tsframe.cumprod(axis=1)
assert np.shape(cumprod_xs) == np.shape(self.tsframe)
# ints
df = self.tsframe.fillna(0).astype(int)
df.cumprod(0)
df.cumprod(1)
# ints32
df = self.tsframe.fillna(0).astype(np.int32)
df.cumprod(0)
df.cumprod(1)
def test_sem(self):
alt = lambda x: np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
result = self.tsframe.sem(ddof=4)
expected = self.tsframe.apply(
lambda x: x.std(ddof=4) / np.sqrt(len(x)))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
if nanops._USE_BOTTLENECK:
nanops._USE_BOTTLENECK = False
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
nanops._USE_BOTTLENECK = True
def test_skew(self):
tm._skip_if_no_scipy()
from scipy.stats import skew
def alt(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', alt)
def test_kurt(self):
tm._skip_if_no_scipy()
from scipy.stats import kurtosis
def alt(x):
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
self._check_stat_op('kurt', alt)
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(6, 3), index=index)
kurt = df.kurt()
kurt2 = df.kurt(level=0).xs('bar')
tm.assert_series_equal(kurt, kurt2, check_names=False)
assert kurt.name is None
assert kurt2.name == 'bar'
def _check_stat_op(self, name, alternative, frame=None, has_skipna=True,
has_numeric_only=False, check_dtype=True,
check_dates=False, check_less_precise=False):
if frame is None:
frame = self.frame
# set some NAs
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
f = getattr(frame, name)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
_f = getattr(df, name)
result = _f()
assert isinstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, name)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
# HACK: win32
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if not tm._incompat_bottleneck_version(name):
exp = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(result1, exp, check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
# result = f(axis=1)
# comp = frame.apply(alternative, axis=1).reindex(result.index)
# assert_series_equal(result, comp)
# bad axis
tm.assert_raises_regex(ValueError, 'No axis named 2', f, axis=2)
# make sure works on mixed-type frame
getattr(self.mixed_frame, name)(axis=0)
getattr(self.mixed_frame, name)(axis=1)
if has_numeric_only:
getattr(self.mixed_frame, name)(axis=0, numeric_only=True)
getattr(self.mixed_frame, name)(axis=1, numeric_only=True)
getattr(self.frame, name)(axis=0, numeric_only=False)
getattr(self.frame, name)(axis=1, numeric_only=False)
# all NA case
if has_skipna:
all_na = self.frame * np.NaN
r0 = getattr(all_na, name)(axis=0)
r1 = getattr(all_na, name)(axis=1)
if not tm._incompat_bottleneck_version(name):
assert np.isnan(r0).all()
assert np.isnan(r1).all()
def test_mode(self):
df = pd.DataFrame({"A": [12, 12, 11, 12, 19, 11],
"B": [10, 10, 10, np.nan, 3, 4],
"C": [8, 8, 8, 9, 9, 9],
"D": np.arange(6, dtype='int64'),
"E": [8, 8, 1, 1, 3, 3]})
tm.assert_frame_equal(df[["A"]].mode(),
pd.DataFrame({"A": [12]}))
expected = pd.Series([0, 1, 2, 3, 4, 5], dtype='int64', name='D').\
to_frame()
tm.assert_frame_equal(df[["D"]].mode(), expected)
expected = pd.Series([1, 3, 8], dtype='int64', name='E').to_frame()
tm.assert_frame_equal(df[["E"]].mode(), expected)
tm.assert_frame_equal(df[["A", "B"]].mode(),
pd.DataFrame({"A": [12], "B": [10.]}))
tm.assert_frame_equal(df.mode(),
pd.DataFrame({"A": [12, np.nan, np.nan, np.nan,
np.nan, np.nan],
"B": [10, np.nan, np.nan, np.nan,
np.nan, np.nan],
"C": [8, 9, np.nan, np.nan, np.nan,
np.nan],
"D": [0, 1, 2, 3, 4, 5],
"E": [1, 3, 8, np.nan, np.nan,
np.nan]}))
# outputs in sorted order
df["C"] = list(reversed(df["C"]))
printing.pprint_thing(df["C"])
printing.pprint_thing(df["C"].mode())
a, b = (df[["A", "B", "C"]].mode(),
pd.DataFrame({"A": [12, np.nan],
"B": [10, np.nan],
"C": [8, 9]}))
printing.pprint_thing(a)
printing.pprint_thing(b)
tm.assert_frame_equal(a, b)
# should work with heterogeneous types
df = pd.DataFrame({"A": np.arange(6, dtype='int64'),
"B": pd.date_range('2011', periods=6),
"C": list('abcdef')})
exp = pd.DataFrame({"A": pd.Series(np.arange(6, dtype='int64'),
dtype=df["A"].dtype),
"B": pd.Series(pd.date_range('2011', periods=6),
dtype=df["B"].dtype),
"C": pd.Series(list('abcdef'),
dtype=df["C"].dtype)})
tm.assert_frame_equal(df.mode(), exp)
def test_operators_timedelta64(self):
from datetime import timedelta
df = DataFrame(dict(A=date_range('2012-1-1', periods=3, freq='D'),
B=date_range('2012-1-2', periods=3, freq='D'),
C=Timestamp('20120101') -
timedelta(minutes=5, seconds=5)))
diffs = DataFrame(dict(A=df['A'] - df['C'],
B=df['A'] - df['B']))
# min
result = diffs.min()
assert result[0] == diffs.loc[0, 'A']
assert result[1] == diffs.loc[0, 'B']
result = diffs.min(axis=1)
assert (result == diffs.loc[0, 'B']).all()
# max
result = diffs.max()
assert result[0] == diffs.loc[2, 'A']
assert result[1] == diffs.loc[2, 'B']
result = diffs.max(axis=1)
assert (result == diffs['A']).all()
# abs
result = diffs.abs()
result2 = abs(diffs)
expected = DataFrame(dict(A=df['A'] - df['C'],
B=df['B'] - df['A']))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# mixed frame
mixed = diffs.copy()
mixed['C'] = 'foo'
mixed['D'] = 1
mixed['E'] = 1.
mixed['F'] = Timestamp('20130101')
# results in an object array
from pandas.core.tools.timedeltas import (
_coerce_scalar_to_timedelta_type as _coerce)
result = mixed.min()
expected = Series([_coerce(timedelta(seconds=5 * 60 + 5)),
_coerce(timedelta(days=-1)),
'foo', 1, 1.0,
Timestamp('20130101')],
index=mixed.columns)
tm.assert_series_equal(result, expected)
# excludes numeric
result = mixed.min(axis=1)
expected = Series([1, 1, 1.], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
# works when only those columns are selected
result = mixed[['A', 'B']].min(1)
expected = Series([timedelta(days=-1)] * 3)
tm.assert_series_equal(result, expected)
result = mixed[['A', 'B']].min()
expected = Series([timedelta(seconds=5 * 60 + 5),
timedelta(days=-1)], index=['A', 'B'])
tm.assert_series_equal(result, expected)
# GH 3106
df = DataFrame({'time': date_range('20130102', periods=5),
'time2': date_range('20130105', periods=5)})
df['off1'] = df['time2'] - df['time']
assert df['off1'].dtype == 'timedelta64[ns]'
df['off2'] = df['time'] - df['time2']
df._consolidate_inplace()
assert df['off1'].dtype == 'timedelta64[ns]'
assert df['off2'].dtype == 'timedelta64[ns]'
def test_sum_corner(self):
axis0 = self.empty.sum(0)
axis1 = self.empty.sum(1)
assert isinstance(axis0, Series)
assert isinstance(axis1, Series)
assert len(axis0) == 0
assert len(axis1) == 0
def test_sum_object(self):
values = self.frame.values.astype(int)
frame = DataFrame(values, index=self.frame.index,
columns=self.frame.columns)
deltas = frame * timedelta(1)
deltas.sum()
def test_sum_bool(self):
# ensure this works, bug report
bools = np.isnan(self.frame)
bools.sum(1)
bools.sum(0)
def test_mean_corner(self):
# unit test when have object data
the_mean = self.mixed_frame.mean(axis=0)
the_sum = self.mixed_frame.sum(axis=0, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
assert len(the_mean.index) < len(self.mixed_frame.columns)
# xs sum mixed type, just want to know it works...
the_mean = self.mixed_frame.mean(axis=1)
the_sum = self.mixed_frame.sum(axis=1, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
# take mean of boolean column
self.frame['bool'] = self.frame['A'] > 0
means = self.frame.mean(0)
assert means['bool'] == self.frame['bool'].values.mean()
def test_stats_mixed_type(self):
# don't blow up
self.mixed_frame.std(1)
self.mixed_frame.var(1)
self.mixed_frame.mean(1)
self.mixed_frame.skew(1)
def test_median_corner(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper, frame=self.intframe,
check_dtype=False, check_dates=True)
# Miscellanea
def test_count_objects(self):
dm = DataFrame(self.mixed_frame._series)
df = DataFrame(self.mixed_frame._series)
tm.assert_series_equal(dm.count(), df.count())
tm.assert_series_equal(dm.count(1), df.count(1))
def test_cumsum_corner(self):
dm = DataFrame(np.arange(20).reshape(4, 5),
index=lrange(4), columns=lrange(5))
# ?(wesm)
result = dm.cumsum() # noqa
def test_sum_bools(self):
df = DataFrame(index=lrange(1), columns=lrange(10))
bools = isnull(df)
assert bools.sum(axis=1)[0] == 10
# Index of max / min
def test_idxmin(self):
frame = self.frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, self.intframe]:
result = df.idxmin(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmin, axis=axis,
skipna=skipna)
tm.assert_series_equal(result, expected)
pytest.raises(ValueError, frame.idxmin, axis=2)
def test_idxmax(self):
frame = self.frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, self.intframe]:
result = df.idxmax(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmax, axis=axis,
skipna=skipna)
tm.assert_series_equal(result, expected)
pytest.raises(ValueError, frame.idxmax, axis=2)
# ----------------------------------------------------------------------
# Logical reductions
def test_any_all(self):
self._check_bool_op('any', np.any, has_skipna=True, has_bool_only=True)
self._check_bool_op('all', np.all, has_skipna=True, has_bool_only=True)
df = DataFrame(randn(10, 4)) > 0
df.any(1)
df.all(1)
df.any(1, bool_only=True)
df.all(1, bool_only=True)
# skip pathological failure cases
# class CantNonzero(object):
# def __nonzero__(self):
# raise ValueError
# df[4] = CantNonzero()
# it works!
# df.any(1)
# df.all(1)
# df.any(1, bool_only=True)
# df.all(1, bool_only=True)
# df[4][4] = np.nan
# df.any(1)
# df.all(1)
# df.any(1, bool_only=True)
# df.all(1, bool_only=True)
def _check_bool_op(self, name, alternative, frame=None, has_skipna=True,
has_bool_only=False):
if frame is None:
frame = self.frame > 0
# set some NAs
frame = DataFrame(frame.values.astype(object), frame.index,
frame.columns)
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
f = getattr(frame, name)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# result = f(axis=1)
# comp = frame.apply(alternative, axis=1).reindex(result.index)
# assert_series_equal(result, comp)
# bad axis
pytest.raises(ValueError, f, axis=2)
# make sure works on mixed-type frame
mixed = self.mixed_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0
getattr(mixed, name)(axis=0)
getattr(mixed, name)(axis=1)
class NonzeroFail:
def __nonzero__(self):
raise ValueError
mixed['_nonzero_fail_'] = NonzeroFail()
if has_bool_only:
getattr(mixed, name)(axis=0, bool_only=True)
getattr(mixed, name)(axis=1, bool_only=True)
getattr(frame, name)(axis=0, bool_only=False)
getattr(frame, name)(axis=1, bool_only=False)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, name)(axis=0)
r1 = getattr(all_na, name)(axis=1)
if name == 'any':
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
# ----------------------------------------------------------------------
# Isin
def test_isin(self):
# GH #4211
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
other = ['a', 'b', 'c']
result = df.isin(other)
expected = DataFrame([df.loc[s].isin(other) for s in df.index])
tm.assert_frame_equal(result, expected)
def test_isin_empty(self):
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
result = df.isin([])
expected = pd.DataFrame(False, df.index, df.columns)
tm.assert_frame_equal(result, expected)
def test_isin_dict(self):
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
d = {'A': ['a']}
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
# non unique columns
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
df.columns = ['A', 'A']
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
def test_isin_with_string_scalar(self):
# GH4763
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
with pytest.raises(TypeError):
df.isin('a')
with pytest.raises(TypeError):
df.isin('aaa')
def test_isin_df(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
df2 = DataFrame({'A': [0, 2, 12, 4], 'B': [2, np.nan, 4, 5]})
expected = DataFrame(False, df1.index, df1.columns)
result = df1.isin(df2)
expected['A'].loc[[1, 3]] = True
expected['B'].loc[[0, 2]] = True
tm.assert_frame_equal(result, expected)
# partial overlapping columns
df2.columns = ['A', 'C']
result = df1.isin(df2)
expected['B'] = False
tm.assert_frame_equal(result, expected)
def test_isin_tuples(self):
# GH16394
df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
df['C'] = list(zip(df['A'], df['B']))
result = df['C'].isin([(1, 'a')])
tm.assert_series_equal(result,
Series([True, False, False], name="C"))
def test_isin_df_dupe_values(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
# just cols duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['B', 'B'])
with pytest.raises(ValueError):
df1.isin(df2)
# just index duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['A', 'B'], index=[0, 0, 1, 1])
with pytest.raises(ValueError):
df1.isin(df2)
# cols and index:
df2.columns = ['B', 'B']
with pytest.raises(ValueError):
df1.isin(df2)
def test_isin_dupe_self(self):
other = DataFrame({'A': [1, 0, 1, 0], 'B': [1, 1, 0, 0]})
df = DataFrame([[1, 1], [1, 0], [0, 0]], columns=['A', 'A'])
result = df.isin(other)
expected = DataFrame(False, index=df.index, columns=df.columns)
expected.loc[0] = True
expected.iloc[1, 1] = True
tm.assert_frame_equal(result, expected)
def test_isin_against_series(self):
df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]},
index=['a', 'b', 'c', 'd'])
s = pd.Series([1, 3, 11, 4], index=['a', 'b', 'c', 'd'])
expected = DataFrame(False, index=df.index, columns=df.columns)
expected['A'].loc['a'] = True
expected.loc['d'] = True
result = df.isin(s)
tm.assert_frame_equal(result, expected)
def test_isin_multiIndex(self):
idx = MultiIndex.from_tuples([(0, 'a', 'foo'), (0, 'a', 'bar'),
(0, 'b', 'bar'), (0, 'b', 'baz'),
(2, 'a', 'foo'), (2, 'a', 'bar'),
(2, 'c', 'bar'), (2, 'c', 'baz'),
(1, 'b', 'foo'), (1, 'b', 'bar'),
(1, 'c', 'bar'), (1, 'c', 'baz')])
df1 = DataFrame({'A': np.ones(12),
'B': np.zeros(12)}, index=idx)
df2 = DataFrame({'A': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
'B': [1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1]})
# against regular index
expected = DataFrame(False, index=df1.index, columns=df1.columns)
result = df1.isin(df2)
tm.assert_frame_equal(result, expected)
df2.index = idx
expected = df2.values.astype(np.bool)
expected[:, 1] = ~expected[:, 1]
expected = DataFrame(expected, columns=['A', 'B'], index=idx)
result = df1.isin(df2)
tm.assert_frame_equal(result, expected)
def test_isin_empty_datetimelike(self):
# GH 15473
df1_ts = DataFrame({'date':
pd.to_datetime(['2014-01-01', '2014-01-02'])})
df1_td = DataFrame({'date':
[pd.Timedelta(1, 's'), pd.Timedelta(2, 's')]})
df2 = DataFrame({'date': []})
df3 = DataFrame()
expected = DataFrame({'date': [False, False]})
result = df1_ts.isin(df2)
tm.assert_frame_equal(result, expected)
result = df1_ts.isin(df3)
tm.assert_frame_equal(result, expected)
result = df1_td.isin(df2)
tm.assert_frame_equal(result, expected)
result = df1_td.isin(df3)
tm.assert_frame_equal(result, expected)
# ----------------------------------------------------------------------
# Row deduplication
def test_drop_duplicates(self):
df = DataFrame({'AAA': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
result = df.drop_duplicates('AAA')
expected = df[:2]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep='last')
expected = df.loc[[6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep=False)
expected = df.loc[[]]
tm.assert_frame_equal(result, expected)
assert len(result) == 0
# multi column
expected = df.loc[[0, 1, 2, 3]]
result = df.drop_duplicates(np.array(['AAA', 'B']))
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['AAA', 'B'])
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(('AAA', 'B'), keep='last')
expected = df.loc[[0, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(('AAA', 'B'), keep=False)
expected = df.loc[[0]]
tm.assert_frame_equal(result, expected)
# consider everything
df2 = df.loc[:, ['AAA', 'B', 'C']]
result = df2.drop_duplicates()
# in this case only
expected = df2.drop_duplicates(['AAA', 'B'])
tm.assert_frame_equal(result, expected)
result = df2.drop_duplicates(keep='last')
expected = df2.drop_duplicates(['AAA', 'B'], keep='last')
tm.assert_frame_equal(result, expected)
result = df2.drop_duplicates(keep=False)
expected = df2.drop_duplicates(['AAA', 'B'], keep=False)
tm.assert_frame_equal(result, expected)
# integers
result = df.drop_duplicates('C')
expected = df.iloc[[0, 2]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep='last')
expected = df.iloc[[-2, -1]]
tm.assert_frame_equal(result, expected)
df['E'] = df['C'].astype('int8')
result = df.drop_duplicates('E')
expected = df.iloc[[0, 2]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('E', keep='last')
expected = df.iloc[[-2, -1]]
tm.assert_frame_equal(result, expected)
# GH 11376
df = pd.DataFrame({'x': [7, 6, 3, 3, 4, 8, 0],
'y': [0, 6, 5, 5, 9, 1, 2]})
expected = df.loc[df.index != 3]
tm.assert_frame_equal(df.drop_duplicates(), expected)
df = pd.DataFrame([[1, 0], [0, 2]])
tm.assert_frame_equal(df.drop_duplicates(), df)
df = pd.DataFrame([[-2, 0], [0, -4]])
tm.assert_frame_equal(df.drop_duplicates(), df)
x = np.iinfo(np.int64).max / 3 * 2
df = pd.DataFrame([[-x, x], [0, x + 4]])
tm.assert_frame_equal(df.drop_duplicates(), df)
df = pd.DataFrame([[-x, x], [x, x + 4]])
tm.assert_frame_equal(df.drop_duplicates(), df)
# GH 11864
df = pd.DataFrame([i] * 9 for i in range(16))
df = df.append([[1] + [0] * 8], ignore_index=True)
for keep in ['first', 'last', False]:
assert df.duplicated(keep=keep).sum() == 0
def test_drop_duplicates_for_take_all(self):
df = DataFrame({'AAA': ['foo', 'bar', 'baz', 'bar',
'foo', 'bar', 'qux', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
result = df.drop_duplicates('AAA')
expected = df.iloc[[0, 1, 2, 6]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep='last')
expected = df.iloc[[2, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep=False)
expected = df.iloc[[2, 6]]
tm.assert_frame_equal(result, expected)
# multiple columns
result = df.drop_duplicates(['AAA', 'B'])
expected = df.iloc[[0, 1, 2, 3, 4, 6]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['AAA', 'B'], keep='last')
expected = df.iloc[[0, 1, 2, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['AAA', 'B'], keep=False)
expected = df.iloc[[0, 1, 2, 6]]
tm.assert_frame_equal(result, expected)
def test_drop_duplicates_tuple(self):
df = DataFrame({('AA', 'AB'): ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
result = df.drop_duplicates(('AA', 'AB'))
expected = df[:2]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(('AA', 'AB'), keep='last')
expected = df.loc[[6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(('AA', 'AB'), keep=False)
expected = df.loc[[]] # empty df
assert len(result) == 0
tm.assert_frame_equal(result, expected)
# multi column
expected = df.loc[[0, 1, 2, 3]]
result = df.drop_duplicates((('AA', 'AB'), 'B'))
tm.assert_frame_equal(result, expected)
def test_drop_duplicates_NA(self):
# none
df = DataFrame({'A': [None, None, 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.],
'D': lrange(8)})
# single column
result = df.drop_duplicates('A')
expected = df.loc[[0, 2, 3]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep='last')
expected = df.loc[[1, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep=False)
expected = df.loc[[]] # empty df
tm.assert_frame_equal(result, expected)
assert len(result) == 0
# multi column
result = df.drop_duplicates(['A', 'B'])
expected = df.loc[[0, 2, 3, 6]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['A', 'B'], keep='last')
expected = df.loc[[1, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['A', 'B'], keep=False)
expected = df.loc[[6]]
tm.assert_frame_equal(result, expected)
# nan
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.],
'D': lrange(8)})
# single column
result = df.drop_duplicates('C')
expected = df[:2]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep='last')
expected = df.loc[[3, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep=False)
expected = df.loc[[]] # empty df
tm.assert_frame_equal(result, expected)
assert len(result) == 0
# multi column
result = df.drop_duplicates(['C', 'B'])
expected = df.loc[[0, 1, 2, 4]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['C', 'B'], keep='last')
expected = df.loc[[1, 3, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['C', 'B'], keep=False)
expected = df.loc[[1]]
tm.assert_frame_equal(result, expected)
def test_drop_duplicates_NA_for_take_all(self):
# none
df = DataFrame({'A': [None, None, 'foo', 'bar',
'foo', 'baz', 'bar', 'qux'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 2., 3, 1.]})
# single column
result = df.drop_duplicates('A')
expected = df.iloc[[0, 2, 3, 5, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep='last')
expected = df.iloc[[1, 4, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep=False)
expected = df.iloc[[5, 7]]
tm.assert_frame_equal(result, expected)
# nan
# single column
result = df.drop_duplicates('C')
expected = df.iloc[[0, 1, 5, 6]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep='last')
expected = df.iloc[[3, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep=False)
expected = df.iloc[[5, 6]]
tm.assert_frame_equal(result, expected)
def test_drop_duplicates_inplace(self):
orig = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
df = orig.copy()
df.drop_duplicates('A', inplace=True)
expected = orig[:2]
result = df
tm.assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates('A', keep='last', inplace=True)
expected = orig.loc[[6, 7]]
result = df
tm.assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates('A', keep=False, inplace=True)
expected = orig.loc[[]]
result = df
tm.assert_frame_equal(result, expected)
assert len(df) == 0
# multi column
df = orig.copy()
df.drop_duplicates(['A', 'B'], inplace=True)
expected = orig.loc[[0, 1, 2, 3]]
result = df
tm.assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates(['A', 'B'], keep='last', inplace=True)
expected = orig.loc[[0, 5, 6, 7]]
result = df
tm.assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates(['A', 'B'], keep=False, inplace=True)
expected = orig.loc[[0]]
result = df
tm.assert_frame_equal(result, expected)
# consider everything
orig2 = orig.loc[:, ['A', 'B', 'C']].copy()
df2 = orig2.copy()
df2.drop_duplicates(inplace=True)
# in this case only
expected = orig2.drop_duplicates(['A', 'B'])
result = df2
tm.assert_frame_equal(result, expected)
df2 = orig2.copy()
df2.drop_duplicates(keep='last', inplace=True)
expected = orig2.drop_duplicates(['A', 'B'], keep='last')
result = df2
tm.assert_frame_equal(result, expected)
df2 = orig2.copy()
df2.drop_duplicates(keep=False, inplace=True)
expected = orig2.drop_duplicates(['A', 'B'], keep=False)
result = df2
tm.assert_frame_equal(result, expected)
# Rounding
def test_round(self):
# GH 2665
# Test that rounding an empty DataFrame does nothing
df = DataFrame()
tm.assert_frame_equal(df, df.round())
# Here's the test frame we'll be working with
df = DataFrame({'col1': [1.123, 2.123, 3.123],
'col2': [1.234, 2.234, 3.234]})
# Default round to integer (i.e. decimals=0)
expected_rounded = DataFrame(
{'col1': [1., 2., 3.], 'col2': [1., 2., 3.]})
tm.assert_frame_equal(df.round(), expected_rounded)
# Round with an integer
decimals = 2
expected_rounded = DataFrame({'col1': [1.12, 2.12, 3.12],
'col2': [1.23, 2.23, 3.23]})
tm.assert_frame_equal(df.round(decimals), expected_rounded)
# This should also work with np.round (since np.round dispatches to
# df.round)
tm.assert_frame_equal(np.round(df, decimals), expected_rounded)
# Round with a list
round_list = [1, 2]
with pytest.raises(TypeError):
df.round(round_list)
# Round with a dictionary
expected_rounded = DataFrame(
{'col1': [1.1, 2.1, 3.1], 'col2': [1.23, 2.23, 3.23]})
round_dict = {'col1': 1, 'col2': 2}
tm.assert_frame_equal(df.round(round_dict), expected_rounded)
# Incomplete dict
expected_partially_rounded = DataFrame(
{'col1': [1.123, 2.123, 3.123], 'col2': [1.2, 2.2, 3.2]})
partial_round_dict = {'col2': 1}
tm.assert_frame_equal(df.round(partial_round_dict),
expected_partially_rounded)
# Dict with unknown elements
wrong_round_dict = {'col3': 2, 'col2': 1}
tm.assert_frame_equal(df.round(wrong_round_dict),
expected_partially_rounded)
# float input to `decimals`
non_int_round_dict = {'col1': 1, 'col2': 0.5}
with pytest.raises(TypeError):
df.round(non_int_round_dict)
# String input
non_int_round_dict = {'col1': 1, 'col2': 'foo'}
with pytest.raises(TypeError):
df.round(non_int_round_dict)
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
# List input
non_int_round_dict = {'col1': 1, 'col2': [1, 2]}
with pytest.raises(TypeError):
df.round(non_int_round_dict)
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
# Non integer Series inputs
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
# Negative numbers
negative_round_dict = {'col1': -1, 'col2': -2}
big_df = df * 100
expected_neg_rounded = DataFrame(
{'col1': [110., 210, 310], 'col2': [100., 200, 300]})
tm.assert_frame_equal(big_df.round(negative_round_dict),
expected_neg_rounded)
# nan in Series round
nan_round_Series = Series({'col1': nan, 'col2': 1})
# TODO(wesm): unused?
expected_nan_round = DataFrame({ # noqa
'col1': [1.123, 2.123, 3.123],
'col2': [1.2, 2.2, 3.2]})
if sys.version < LooseVersion('2.7'):
# Rounding with decimal is a ValueError in Python < 2.7
with pytest.raises(ValueError):
df.round(nan_round_Series)
else:
with pytest.raises(TypeError):
df.round(nan_round_Series)
# Make sure this doesn't break existing Series.round
tm.assert_series_equal(df['col1'].round(1), expected_rounded['col1'])
# named columns
# GH 11986
decimals = 2
expected_rounded = DataFrame(
{'col1': [1.12, 2.12, 3.12], 'col2': [1.23, 2.23, 3.23]})
df.columns.name = "cols"
expected_rounded.columns.name = "cols"
tm.assert_frame_equal(df.round(decimals), expected_rounded)
# interaction of named columns & series
tm.assert_series_equal(df['col1'].round(decimals),
expected_rounded['col1'])
tm.assert_series_equal(df.round(decimals)['col1'],
expected_rounded['col1'])
def test_numpy_round(self):
# See gh-12600
df = DataFrame([[1.53, 1.36], [0.06, 7.01]])
out = np.round(df, decimals=0)
expected = DataFrame([[2., 1.], [0., 7.]])
tm.assert_frame_equal(out, expected)
msg = "the 'out' parameter is not supported"
with tm.assert_raises_regex(ValueError, msg):
np.round(df, decimals=0, out=df)
def test_round_mixed_type(self):
# GH11885
df = DataFrame({'col1': [1.1, 2.2, 3.3, 4.4],
'col2': ['1', 'a', 'c', 'f'],
'col3': date_range('20111111', periods=4)})
round_0 = DataFrame({'col1': [1., 2., 3., 4.],
'col2': ['1', 'a', 'c', 'f'],
'col3': date_range('20111111', periods=4)})
tm.assert_frame_equal(df.round(), round_0)
tm.assert_frame_equal(df.round(1), df)
tm.assert_frame_equal(df.round({'col1': 1}), df)
tm.assert_frame_equal(df.round({'col1': 0}), round_0)
tm.assert_frame_equal(df.round({'col1': 0, 'col2': 1}), round_0)
tm.assert_frame_equal(df.round({'col3': 1}), df)
def test_round_issue(self):
# GH11611
df = pd.DataFrame(np.random.random([3, 3]), columns=['A', 'B', 'C'],
index=['first', 'second', 'third'])
dfs = pd.concat((df, df), axis=1)
rounded = dfs.round()
tm.assert_index_equal(rounded.index, dfs.index)
decimals = pd.Series([1, 0, 2], index=['A', 'B', 'A'])
pytest.raises(ValueError, df.round, decimals)
def test_built_in_round(self):
if not compat.PY3:
pytest.skip("build in round cannot be overriden "
"prior to Python 3")
# GH11763
# Here's the test frame we'll be working with
df = DataFrame(
{'col1': [1.123, 2.123, 3.123], 'col2': [1.234, 2.234, 3.234]})
# Default round to integer (i.e. decimals=0)
expected_rounded = DataFrame(
{'col1': [1., 2., 3.], 'col2': [1., 2., 3.]})
tm.assert_frame_equal(round(df), expected_rounded)
# Clip
def test_clip(self):
median = self.frame.median().median()
capped = self.frame.clip_upper(median)
assert not (capped.values > median).any()
floored = self.frame.clip_lower(median)
assert not (floored.values < median).any()
double = self.frame.clip(upper=median, lower=median)
assert not (double.values != median).any()
def test_dataframe_clip(self):
# GH #2747
df = DataFrame(np.random.randn(1000, 2))
for lb, ub in [(-1, 1), (1, -1)]:
clipped_df = df.clip(lb, ub)
lb, ub = min(lb, ub), max(ub, lb)
lb_mask = df.values <= lb
ub_mask = df.values >= ub
mask = ~lb_mask & ~ub_mask
assert (clipped_df.values[lb_mask] == lb).all()
assert (clipped_df.values[ub_mask] == ub).all()
assert (clipped_df.values[mask] == df.values[mask]).all()
@pytest.mark.xfail(reason=("clip on mixed integer or floats "
"with integer clippers coerces to float"))
def test_clip_mixed_numeric(self):
df = DataFrame({'A': [1, 2, 3],
'B': [1., np.nan, 3.]})
result = df.clip(1, 2)
expected = DataFrame({'A': [1, 2, 2],
'B': [1., np.nan, 2.]})
tm.assert_frame_equal(result, expected, check_like=True)
def test_clip_against_series(self):
# GH #6966
df = DataFrame(np.random.randn(1000, 2))
lb = Series(np.random.randn(1000))
ub = lb + 1
clipped_df = df.clip(lb, ub, axis=0)
for i in range(2):
lb_mask = df.iloc[:, i] <= lb
ub_mask = df.iloc[:, i] >= ub
mask = ~lb_mask & ~ub_mask
result = clipped_df.loc[lb_mask, i]
tm.assert_series_equal(result, lb[lb_mask], check_names=False)
assert result.name == i
result = clipped_df.loc[ub_mask, i]
tm.assert_series_equal(result, ub[ub_mask], check_names=False)
assert result.name == i
tm.assert_series_equal(clipped_df.loc[mask, i], df.loc[mask, i])
def test_clip_against_frame(self):
df = DataFrame(np.random.randn(1000, 2))
lb = DataFrame(np.random.randn(1000, 2))
ub = lb + 1
clipped_df = df.clip(lb, ub)
lb_mask = df <= lb
ub_mask = df >= ub
mask = ~lb_mask & ~ub_mask
tm.assert_frame_equal(clipped_df[lb_mask], lb[lb_mask])
tm.assert_frame_equal(clipped_df[ub_mask], ub[ub_mask])
tm.assert_frame_equal(clipped_df[mask], df[mask])
# Matrix-like
def test_dot(self):
a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'],
columns=['p', 'q', 'r', 's'])
b = DataFrame(np.random.randn(4, 2), index=['p', 'q', 'r', 's'],
columns=['one', 'two'])
result = a.dot(b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
# Check alignment
b1 = b.reindex(index=reversed(b.index))
result = a.dot(b)
tm.assert_frame_equal(result, expected)
# Check series argument
result = a.dot(b['one'])
tm.assert_series_equal(result, expected['one'], check_names=False)
assert result.name is None
result = a.dot(b1['one'])
tm.assert_series_equal(result, expected['one'], check_names=False)
assert result.name is None
# can pass correct-length arrays
row = a.iloc[0].values
result = a.dot(row)
exp = a.dot(a.iloc[0])
tm.assert_series_equal(result, exp)
with tm.assert_raises_regex(ValueError,
'Dot product shape mismatch'):
a.dot(row[:-1])
a = np.random.rand(1, 5)
b = np.random.rand(5, 1)
A = DataFrame(a)
# TODO(wesm): unused
B = DataFrame(b) # noqa
# it works
result = A.dot(b)
# unaligned
df = DataFrame(randn(3, 4), index=[1, 2, 3], columns=lrange(4))
df2 = DataFrame(randn(5, 3), index=lrange(5), columns=[1, 2, 3])
with tm.assert_raises_regex(ValueError, 'aligned'):
df.dot(df2)
@pytest.fixture
def df_duplicates():
return pd.DataFrame({'a': [1, 2, 3, 4, 4],
'b': [1, 1, 1, 1, 1],
'c': [0, 1, 2, 5, 4]},
index=[0, 0, 1, 1, 1])
@pytest.fixture
def df_strings():
return pd.DataFrame({'a': np.random.permutation(10),
'b': list(ascii_lowercase[:10]),
'c': np.random.permutation(10).astype('float64')})
@pytest.fixture
def df_main_dtypes():
return pd.DataFrame(
{'group': [1, 1, 2],
'int': [1, 2, 3],
'float': [4., 5., 6.],
'string': list('abc'),
'category_string': pd.Series(list('abc')).astype('category'),
'category_int': [7, 8, 9],
'datetime': pd.date_range('20130101', periods=3),
'datetimetz': pd.date_range('20130101',
periods=3,
tz='US/Eastern'),
'timedelta': pd.timedelta_range('1 s', periods=3, freq='s')},
columns=['group', 'int', 'float', 'string',
'category_string', 'category_int',
'datetime', 'datetimetz',
'timedelta'])
class TestNLargestNSmallest(object):
dtype_error_msg_template = ("Column {column!r} has dtype {dtype}, cannot "
"use method {method!r} with this dtype")
# ----------------------------------------------------------------------
# Top / bottom
@pytest.mark.parametrize(
'method, n, order',
product(['nsmallest', 'nlargest'], range(1, 11),
[['a'],
['c'],
['a', 'b'],
['a', 'c'],
['b', 'a'],
['b', 'c'],
['a', 'b', 'c'],
['c', 'a', 'b'],
['c', 'b', 'a'],
['b', 'c', 'a'],
['b', 'a', 'c'],
# dups!
['b', 'c', 'c'],
]))
def test_n(self, df_strings, method, n, order):
# GH10393
df = df_strings
if 'b' in order:
error_msg = self.dtype_error_msg_template.format(
column='b', method=method, dtype='object')
with tm.assert_raises_regex(TypeError, error_msg):
getattr(df, method)(n, order)
else:
ascending = method == 'nsmallest'
result = getattr(df, method)(n, order)
expected = df.sort_values(order, ascending=ascending).head(n)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
'method, columns',
product(['nsmallest', 'nlargest'],
product(['group'], ['category_string', 'string'])
))
def test_n_error(self, df_main_dtypes, method, columns):
df = df_main_dtypes
error_msg = self.dtype_error_msg_template.format(
column=columns[1], method=method, dtype=df[columns[1]].dtype)
with tm.assert_raises_regex(TypeError, error_msg):
getattr(df, method)(2, columns)
def test_n_all_dtypes(self, df_main_dtypes):
df = df_main_dtypes
df.nsmallest(2, list(set(df) - {'category_string', 'string'}))
df.nlargest(2, list(set(df) - {'category_string', 'string'}))
def test_n_identical_values(self):
# GH15297
df = pd.DataFrame({'a': [1] * 5, 'b': [1, 2, 3, 4, 5]})
result = df.nlargest(3, 'a')
expected = pd.DataFrame(
{'a': [1] * 3, 'b': [1, 2, 3]}, index=[0, 1, 2]
)
tm.assert_frame_equal(result, expected)
result = df.nsmallest(3, 'a')
expected = pd.DataFrame({'a': [1] * 3, 'b': [1, 2, 3]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
'n, order',
product([1, 2, 3, 4, 5],
[['a', 'b', 'c'],
['c', 'b', 'a'],
['a'],
['b'],
['a', 'b'],
['c', 'b']]))
def test_n_duplicate_index(self, df_duplicates, n, order):
# GH 13412
df = df_duplicates
result = df.nsmallest(n, order)
expected = df.sort_values(order).head(n)
tm.assert_frame_equal(result, expected)
result = df.nlargest(n, order)
expected = df.sort_values(order, ascending=False).head(n)
tm.assert_frame_equal(result, expected)
def test_series_broadcasting(self):
# smoke test for numpy warnings
# GH 16378, GH 16306
df = DataFrame([1.0, 1.0, 1.0])
df_nan = DataFrame({'A': [np.nan, 2.0, np.nan]})
s = Series([1, 1, 1])
s_nan = Series([np.nan, np.nan, 1])
with tm.assert_produces_warning(None):
df_nan.clip_lower(s, axis=0)
for op in ['lt', 'le', 'gt', 'ge', 'eq', 'ne']:
getattr(df, op)(s_nan, axis=0)
| mit |
kevin-intel/scikit-learn | examples/linear_model/plot_iris_logistic.py | 14 | 1760 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
first two dimensions (sepal length and width) of the `iris
<https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The datapoints
are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
# Create an instance of Logistic Regression Classifier and fit the data.
logreg = LogisticRegression(C=1e5)
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
h = .02 # step size in the mesh
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
JT5D/scikit-learn | sklearn/metrics/cluster/__init__.py | 312 | 1322 | """
The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for
cluster analysis results. There are two forms of evaluation:
- supervised, which uses a ground truth class values for each sample.
- unsupervised, which does not and measures the 'quality' of the model itself.
"""
from .supervised import adjusted_mutual_info_score
from .supervised import normalized_mutual_info_score
from .supervised import adjusted_rand_score
from .supervised import completeness_score
from .supervised import contingency_matrix
from .supervised import expected_mutual_information
from .supervised import homogeneity_completeness_v_measure
from .supervised import homogeneity_score
from .supervised import mutual_info_score
from .supervised import v_measure_score
from .supervised import entropy
from .unsupervised import silhouette_samples
from .unsupervised import silhouette_score
from .bicluster import consensus_score
__all__ = ["adjusted_mutual_info_score", "normalized_mutual_info_score",
"adjusted_rand_score", "completeness_score", "contingency_matrix",
"expected_mutual_information", "homogeneity_completeness_v_measure",
"homogeneity_score", "mutual_info_score", "v_measure_score",
"entropy", "silhouette_samples", "silhouette_score",
"consensus_score"]
| bsd-3-clause |
phaller0513/aima-python | submissions/Haller/myNN.py | 3 | 5247 | from sklearn import datasets
from sklearn.neural_network import MLPClassifier
import traceback
from submissions.aartiste import election
from submissions.aartiste import county_demographics
class DataFrame:
data = []
feature_names = []
target = []
target_names = []
trumpECHP = DataFrame()
'''
Extract data from the CORGIS elections, and merge it with the
CORGIS demographics. Both data sets are organized by county and state.
'''
joint = {}
elections = election.get_results()
for county in elections:
try:
st = county['Location']['State Abbreviation']
countyST = county['Location']['County'] + st
trump = county['Vote Data']['Donald Trump']['Percent of Votes']
joint[countyST] = {}
joint[countyST]['ST']= st
joint[countyST]['Trump'] = trump
except:
traceback.print_exc()
demographics = county_demographics.get_all_counties()
for county in demographics:
try:
countyNames = county['County'].split()
cName = ' '.join(countyNames[:-1])
st = county['State']
countyST = cName + st
# elderly =
# college =
# home =
# poverty =
if countyST in joint:
joint[countyST]['Elderly'] = county['Age']["Percent 65 and Older"]
joint[countyST]['HighSchool'] = county['Education']["High School or Higher"]
joint[countyST]['College'] = county['Education']["Bachelor's Degree or Higher"]
joint[countyST]['White'] = county['Ethnicities']["White Alone, not Hispanic or Latino"]
joint[countyST]['Persons'] = county['Housing']["Persons per Household"]
joint[countyST]['Home'] = county['Housing']["Homeownership Rate"]
joint[countyST]['Income'] = county['Income']["Median Houseold Income"]
joint[countyST]['Poverty'] = county['Income']["Persons Below Poverty Level"]
joint[countyST]['Sales'] = county['Sales']["Retail Sales per Capita"]
except:
traceback.print_exc()
'''
Remove the counties that did not appear in both samples.
'''
intersection = {}
for countyST in joint:
if 'College' in joint[countyST]:
intersection[countyST] = joint[countyST]
trumpECHP.data = []
'''
Build the input frame, row by row.
'''
for countyST in intersection:
# choose the input values
row = []
for key in intersection[countyST]:
if key in ['ST', 'Trump']:
continue
row.append(intersection[countyST][key])
trumpECHP.data.append(row)
firstCounty = next(iter(intersection.keys()))
firstRow = intersection[firstCounty]
trumpECHP.feature_names = list(firstRow.keys())
trumpECHP.feature_names.remove('ST')
trumpECHP.feature_names.remove('Trump')
'''
Build the target list,
one entry for each row in the input frame.
The Naive Bayesian network is a classifier,
i.e. it sorts data points into bins.
The best it can do to estimate a continuous variable
is to break the domain into segments, and predict
the segment into which the variable's value will fall.
In this example, I'm breaking Trump's % into two
arbitrary segments.
'''
trumpECHP.target = []
def trumpTarget(percentage):
if percentage > 45:
return 1
return 0
for countyST in intersection:
# choose the target
tt = trumpTarget(intersection[countyST]['Trump'])
trumpECHP.target.append(tt)
trumpECHP.target_names = [
'Trump <= 45%',
'Trump > 45%',
]
'''
Make a customn classifier,
'''
mlpc = MLPClassifier(
# hidden_layer_sizes = (100,),
# activation = 'relu',
solver='sgd', # 'adam',
# alpha = 0.0001,
# batch_size='auto',
learning_rate = 'adaptive', # 'constant',
# power_t = 0.5,
max_iter = 1000, # 200,
# shuffle = True,
# random_state = None,
# tol = 1e-4,
# verbose = False,
# warm_start = False,
# momentum = 0.9,
# nesterovs_momentum = True,
# early_stopping = False,
# validation_fraction = 0.1,
# beta_1 = 0.9,
# beta_2 = 0.999,
# epsilon = 1e-8,
)
'''
Try scaling the data.
'''
trumpScaled = DataFrame()
def setupScales(grid):
global min, max
min = list(grid[0])
max = list(grid[0])
for row in range(1, len(grid)):
for col in range(len(grid[row])):
cell = grid[row][col]
if cell < min[col]:
min[col] = cell
if cell > max[col]:
max[col] = cell
def scaleGrid(grid):
newGrid = []
for row in range(len(grid)):
newRow = []
for col in range(len(grid[row])):
try:
cell = grid[row][col]
scaled = (cell - min[col]) \
/ (max[col] - min[col])
newRow.append(scaled)
except:
pass
newGrid.append(newRow)
return newGrid
setupScales(trumpECHP.data)
trumpScaled.data = scaleGrid(trumpECHP.data)
trumpScaled.feature_names = trumpECHP.feature_names
trumpScaled.target = trumpECHP.target
trumpScaled.target_names = trumpECHP.target_names
Examples = {
'TrumpDefault': {
'frame': trumpECHP,
},
'TrumpSGD': {
'frame': trumpECHP,
'mlpc': mlpc
},
'TrumpScaled': {
'frame': trumpScaled,
},
} | mit |
MatthieuBizien/scikit-learn | examples/gaussian_process/plot_gpr_noisy.py | 104 | 3778 | """
=============================================================
Gaussian process regression (GPR) with noise-level estimation
=============================================================
This example illustrates that GPR with a sum-kernel including a WhiteKernel can
estimate the noise level of data. An illustration of the
log-marginal-likelihood (LML) landscape shows that there exist two local
maxima of LML. The first corresponds to a model with a high noise level and a
large length scale, which explains all variations in the data by noise. The
second one has a smaller noise level and shorter length scale, which explains
most of the variation by the noise-free functional relationship. The second
model has a higher likelihood; however, depending on the initial value for the
hyperparameters, the gradient-based optimization might also converge to the
high-noise solution. It is thus important to repeat the optimization several
times for different initializations.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import LogNorm
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, WhiteKernel
rng = np.random.RandomState(0)
X = rng.uniform(0, 5, 20)[:, np.newaxis]
y = 0.5 * np.sin(3 * X[:, 0]) + rng.normal(0, 0.5, X.shape[0])
# First run
plt.figure(0)
kernel = 1.0 * RBF(length_scale=100.0, length_scale_bounds=(1e-2, 1e3)) \
+ WhiteKernel(noise_level=1, noise_level_bounds=(1e-10, 1e+1))
gp = GaussianProcessRegressor(kernel=kernel,
alpha=0.0).fit(X, y)
X_ = np.linspace(0, 5, 100)
y_mean, y_cov = gp.predict(X_[:, np.newaxis], return_cov=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - np.sqrt(np.diag(y_cov)),
y_mean + np.sqrt(np.diag(y_cov)),
alpha=0.5, color='k')
plt.plot(X_, 0.5*np.sin(3*X_), 'r', lw=3, zorder=9)
plt.scatter(X[:, 0], y, c='r', s=50, zorder=10)
plt.title("Initial: %s\nOptimum: %s\nLog-Marginal-Likelihood: %s"
% (kernel, gp.kernel_,
gp.log_marginal_likelihood(gp.kernel_.theta)))
plt.tight_layout()
# Second run
plt.figure(1)
kernel = 1.0 * RBF(length_scale=1.0, length_scale_bounds=(1e-2, 1e3)) \
+ WhiteKernel(noise_level=1e-5, noise_level_bounds=(1e-10, 1e+1))
gp = GaussianProcessRegressor(kernel=kernel,
alpha=0.0).fit(X, y)
X_ = np.linspace(0, 5, 100)
y_mean, y_cov = gp.predict(X_[:, np.newaxis], return_cov=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - np.sqrt(np.diag(y_cov)),
y_mean + np.sqrt(np.diag(y_cov)),
alpha=0.5, color='k')
plt.plot(X_, 0.5*np.sin(3*X_), 'r', lw=3, zorder=9)
plt.scatter(X[:, 0], y, c='r', s=50, zorder=10)
plt.title("Initial: %s\nOptimum: %s\nLog-Marginal-Likelihood: %s"
% (kernel, gp.kernel_,
gp.log_marginal_likelihood(gp.kernel_.theta)))
plt.tight_layout()
# Plot LML landscape
plt.figure(2)
theta0 = np.logspace(-2, 3, 49)
theta1 = np.logspace(-2, 0, 50)
Theta0, Theta1 = np.meshgrid(theta0, theta1)
LML = [[gp.log_marginal_likelihood(np.log([0.36, Theta0[i, j], Theta1[i, j]]))
for i in range(Theta0.shape[0])] for j in range(Theta0.shape[1])]
LML = np.array(LML).T
vmin, vmax = (-LML).min(), (-LML).max()
vmax = 50
plt.contour(Theta0, Theta1, -LML,
levels=np.logspace(np.log10(vmin), np.log10(vmax), 50),
norm=LogNorm(vmin=vmin, vmax=vmax))
plt.colorbar()
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Length-scale")
plt.ylabel("Noise-level")
plt.title("Log-marginal-likelihood")
plt.tight_layout()
plt.show()
| bsd-3-clause |
kjung/scikit-learn | examples/gaussian_process/plot_compare_gpr_krr.py | 67 | 5191 | """
==========================================================
Comparison of kernel ridge and Gaussian process regression
==========================================================
Both kernel ridge regression (KRR) and Gaussian process regression (GPR) learn
a target function by employing internally the "kernel trick". KRR learns a
linear function in the space induced by the respective kernel which corresponds
to a non-linear function in the original space. The linear function in the
kernel space is chosen based on the mean-squared error loss with
ridge regularization. GPR uses the kernel to define the covariance of
a prior distribution over the target functions and uses the observed training
data to define a likelihood function. Based on Bayes theorem, a (Gaussian)
posterior distribution over target functions is defined, whose mean is used
for prediction.
A major difference is that GPR can choose the kernel's hyperparameters based
on gradient-ascent on the marginal likelihood function while KRR needs to
perform a grid search on a cross-validated loss function (mean-squared error
loss). A further difference is that GPR learns a generative, probabilistic
model of the target function and can thus provide meaningful confidence
intervals and posterior samples along with the predictions while KRR only
provides predictions.
This example illustrates both methods on an artificial dataset, which
consists of a sinusoidal target function and strong noise. The figure compares
the learned model of KRR and GPR based on a ExpSineSquared kernel, which is
suited for learning periodic functions. The kernel's hyperparameters control
the smoothness (l) and periodicity of the kernel (p). Moreover, the noise level
of the data is learned explicitly by GPR by an additional WhiteKernel component
in the kernel and by the regularization parameter alpha of KRR.
The figure shows that both methods learn reasonable models of the target
function. GPR correctly identifies the periodicity of the function to be
roughly 2*pi (6.28), while KRR chooses the doubled periodicity 4*pi. Besides
that, GPR provides reasonable confidence bounds on the prediction which are not
available for KRR. A major difference between the two methods is the time
required for fitting and predicting: while fitting KRR is fast in principle,
the grid-search for hyperparameter optimization scales exponentially with the
number of hyperparameters ("curse of dimensionality"). The gradient-based
optimization of the parameters in GPR does not suffer from this exponential
scaling and is thus considerable faster on this example with 3-dimensional
hyperparameter space. The time for predicting is similar; however, generating
the variance of the predictive distribution of GPR takes considerable longer
than just predicting the mean.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.kernel_ridge import KernelRidge
from sklearn.model_selection import GridSearchCV
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import WhiteKernel, ExpSineSquared
rng = np.random.RandomState(0)
# Generate sample data
X = 15 * rng.rand(100, 1)
y = np.sin(X).ravel()
y += 3 * (0.5 - rng.rand(X.shape[0])) # add noise
# Fit KernelRidge with parameter selection based on 5-fold cross validation
param_grid = {"alpha": [1e0, 1e-1, 1e-2, 1e-3],
"kernel": [ExpSineSquared(l, p)
for l in np.logspace(-2, 2, 10)
for p in np.logspace(0, 2, 10)]}
kr = GridSearchCV(KernelRidge(), cv=5, param_grid=param_grid)
stime = time.time()
kr.fit(X, y)
print("Time for KRR fitting: %.3f" % (time.time() - stime))
gp_kernel = ExpSineSquared(1.0, 5.0, periodicity_bounds=(1e-2, 1e1)) \
+ WhiteKernel(1e-1)
gpr = GaussianProcessRegressor(kernel=gp_kernel)
stime = time.time()
gpr.fit(X, y)
print("Time for GPR fitting: %.3f" % (time.time() - stime))
# Predict using kernel ridge
X_plot = np.linspace(0, 20, 10000)[:, None]
stime = time.time()
y_kr = kr.predict(X_plot)
print("Time for KRR prediction: %.3f" % (time.time() - stime))
# Predict using kernel ridge
stime = time.time()
y_gpr = gpr.predict(X_plot, return_std=False)
print("Time for GPR prediction: %.3f" % (time.time() - stime))
stime = time.time()
y_gpr, y_std = gpr.predict(X_plot, return_std=True)
print("Time for GPR prediction with standard-deviation: %.3f"
% (time.time() - stime))
# Plot results
plt.figure(figsize=(10, 5))
lw = 2
plt.scatter(X, y, c='k', label='data')
plt.plot(X_plot, np.sin(X_plot), color='navy', lw=lw, label='True')
plt.plot(X_plot, y_kr, color='turquoise', lw=lw,
label='KRR (%s)' % kr.best_params_)
plt.plot(X_plot, y_gpr, color='darkorange', lw=lw,
label='GPR (%s)' % gpr.kernel_)
plt.fill_between(X_plot[:, 0], y_gpr - y_std, y_gpr + y_std, color='darkorange',
alpha=0.2)
plt.xlabel('data')
plt.ylabel('target')
plt.xlim(0, 20)
plt.ylim(-4, 4)
plt.title('GPR versus Kernel Ridge')
plt.legend(loc="best", scatterpoints=1, prop={'size': 8})
plt.show()
| bsd-3-clause |
ssaeger/scikit-learn | examples/ensemble/plot_gradient_boosting_regularization.py | 355 | 2843 | """
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
| bsd-3-clause |
shusenl/scikit-learn | sklearn/externals/joblib/__init__.py | 72 | 4795 | """ Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://pythonhosted.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> import numpy as np
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make is easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.9.0b4'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
| bsd-3-clause |
shakamunyi/tensorflow | tensorflow/examples/learn/text_classification_cnn.py | 29 | 5677 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for CNN-based text classification with DBpedia data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
EMBEDDING_SIZE = 20
N_FILTERS = 10
WINDOW_SIZE = 20
FILTER_SHAPE1 = [WINDOW_SIZE, EMBEDDING_SIZE]
FILTER_SHAPE2 = [WINDOW_SIZE, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
n_words = 0
MAX_LABEL = 15
WORDS_FEATURE = 'words' # Name of the input words feature.
def cnn_model(features, labels, mode):
"""2 layer ConvNet to predict from sequence of words to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = tf.contrib.layers.embed_sequence(
features[WORDS_FEATURE], vocab_size=n_words, embed_dim=EMBEDDING_SIZE)
word_vectors = tf.expand_dims(word_vectors, 3)
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.layers.conv2d(
word_vectors,
filters=N_FILTERS,
kernel_size=FILTER_SHAPE1,
padding='VALID',
# Add a ReLU for non linearity.
activation=tf.nn.relu)
# Max pooling across output of Convolution+Relu.
pool1 = tf.layers.max_pooling2d(
conv1,
pool_size=POOLING_WINDOW,
strides=POOLING_STRIDE,
padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = tf.layers.conv2d(
pool1,
filters=N_FILTERS,
kernel_size=FILTER_SHAPE2,
padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
logits = tf.layers.dense(pool2, MAX_LABEL, activation=None)
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
})
onehot_labels = tf.one_hot(labels, MAX_LABEL, 1, 0)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
global n_words
# Prepare training and testing data
dbpedia = tf.contrib.learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(
MAX_DOCUMENT_LENGTH)
x_train = np.array(list(vocab_processor.fit_transform(x_train)))
x_test = np.array(list(vocab_processor.transform(x_test)))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model
classifier = tf.estimator.Estimator(model_fn=cnn_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: x_train},
y=y_train,
batch_size=len(x_train),
num_epochs=None,
shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: x_test},
y=y_test,
num_epochs=1,
shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
strawlab/pyopy | setup.py | 1 | 1735 | #!/usr/bin/env python2
# coding=utf-8
# Authors: Santi Villalba <[email protected]>
# Licence: BSD 3 clause
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='pyopy',
license='BSD 3 clause',
description='PYthon->Octave->PYthon: Tools to pythonize matlab/octave libraries',
version='0.1.1-dev',
url='https://github.com/strawlab/pyopy',
author='Santi Villalba',
author_email='[email protected]',
packages=['pyopy',
'pyopy.minioct2py',
'pyopy.externals',
'pyopy.externals.ompc',
'pyopy.tests',
'pyopy.hctsa',
'pyopy.hctsa.tests'],
entry_points={
'console_scripts': [
'hctsa-cli = pyopy.hctsa.hctsa_cli:main',
]
},
classifiers=[
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: MacOS',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
],
requires=['numpy',
'scipy',
'pandas',
'joblib',
'argh',
'whatami',
'lockfile'],
extras_require={
'oct2py': ['oct2py>=3.1.0'],
'pymatbridge': ['pymatbridge>=0.4.3'],
'matlab_wrapper': ['matlab_wrapper>=0.9.6'],
'mathworks': [], # matlab python engine (http://www.mathworks.com/help/matlab/matlab-engine-for-python.html)
},
tests_require=['pytest'],
)
| bsd-3-clause |
barbagroup/cuIBM | external/snake-0.3/snake/barbaGroupSimulation.py | 2 | 26711 | """
Implementation of the class `BarbaGroupSimulation`, a container for the
numerical solution from BarbaGroup's software (cuIBM and PetIBM).
"""
import os
import numpy
from .simulation import Simulation
from .field import Field
class BarbaGroupSimulation(Simulation):
"""
Contains info about a BarbaGroup simulation.
Inherits from the class `Simulation`.
"""
def __init__(self, software,
description=None,
directory=os.getcwd(),
**kwargs):
"""
Initializes object by calling parent constructor.
Parameters
----------
software: string
Software used;
choices: 'cuibm', 'petibm'.
description: string, optional
Description of the simulation;
default: None.
directory: string, optional
Directory of the simulation;
default: present working directory.
"""
super(BarbaGroupSimulation, self).__init__(software,
description=description,
directory=directory,
**kwargs)
def create_uniform_grid(self,
bottom_left=[0.0, 0.0],
top_right=[1.0, 1.0],
n_cells=[100, 100]):
"""
Creates a uniform 2D structured Cartesian grid.
Parameters
----------
bottom_left: list of floats, optional
Coordinates of the bottom-left corner;
default: [0.0, 0.0].
top_right: list of floats, optional
Coordinates of the top-right corner;
default: [1.0, 1.0].
n_cells: list of integers, optional
Number of cells in each direction;
default: [100, 100].
"""
print('[info] creating a uniform 2D Cartesian grid ...'),
assert len(bottom_left) == len(n_cells)
assert len(top_right) == len(n_cells)
self.grid = []
for i, n in enumerate(n_cells):
self.grid.append(numpy.linspace(bottom_left[i], top_right[i], n + 1,
dtype=numpy.float64))
print('done')
def get_time_steps(self, time_steps_range=None, directory=None):
"""
Returns a list of the time-steps to post-process.
If the range is not provided, the method lists the time-step folders
present in the directory (either provided or taken as the simulation
directory).
Parameters
----------
time_steps_range: 3-list of integers, optional
Initial, final and stride of the time-steps to consider;
default: None (all saved time-steps).
directory: string, optional
Directory containing the saved time-step folders;
default: None (will use the simulation directory).
"""
if time_steps_range:
return range(time_steps_range[0],
time_steps_range[1] + 1,
time_steps_range[2])
else:
if not directory:
directory = self.directory
return sorted(int(folder) for folder in os.listdir(directory)
if folder[0] == '0')
def get_grid_spacing(self):
"""
Returns the grid-spacing of a uniform grid.
"""
return (self.grid[0][-1] - self.grid[0][0]) / (self.grid[0].size - 1)
def read_fields(self, field_names, time_step,
periodic_directions=[],
directory=None):
"""
Gets the fields at a given time-step.
Parameters
----------
field_names: list of strings or single string
Name of the fields to get;
choices: 'pressure', 'vorticity',
'x-velocity', 'y-velocity',
'x-flux', 'y-flux'.
time_step: integer
Time-step at which the solution is read.
periodic_directions: list of strings, optional
Directions that uses periodic boundary conditions;
choices: 'x', 'y', 'z';
default: [].
directory: string, optional
Directory containing the numerical solution at given time-step;
default: None (will use <simulation-directory>/<time-step>).
"""
# convert field_names in list if single string provided
if not isinstance(field_names, (list, tuple)):
field_names = [field_names]
if not directory:
directory = os.path.join(self.directory, '{:0>7}'.format(time_step))
if 'pressure' in field_names:
self.fields['pressure'] = self.read_pressure(time_step,
directory=directory)
if any(name in ['x-flux', 'y-flux'] for name in field_names):
fluxes = self.read_fluxes(time_step,
periodic_directions=periodic_directions,
directory=directory)
self.fields['x-flux'], self.fields['y-flux'] = fluxes
if any(name in ['x-velocity', 'y-velocity'] for name in field_names):
velocities = self.get_velocity(time_step,
periodic_directions=periodic_directions,
directory=directory)
self.fields['x-velocity'], self.fields['y-velocity'] = velocities
if 'vorticity' in field_names:
velocities = self.get_velocity(time_step,
periodic_directions=periodic_directions,
directory=directory)
self.fields['x-velocity'], self.fields['y-velocity'] = velocities
self.fields['vorticity'] = self.compute_vorticity()
def compute_vorticity(self):
"""
Computes the vorticity field for a two-dimensional simulation.
Returns
-------
vorticity: Field object
The vorticity field.
"""
time_step = self.fields['x-velocity'].time_step
print('[time-step {}] computing the vorticity field ...'.format(time_step))
u, v = self.fields['x-velocity'], self.fields['y-velocity']
mask_x = numpy.where(numpy.logical_and(u.x > v.x[0], u.x < v.x[-1]))[0]
mask_y = numpy.where(numpy.logical_and(v.y > u.y[0], v.y < u.y[-1]))[0]
# vorticity nodes at cell vertices intersection
xw, yw = 0.5 * (v.x[:-1] + v.x[1:]), 0.5 * (u.y[:-1] + u.y[1:])
# compute vorticity
w = ((v.values[mask_y, 1:] - v.values[mask_y, :-1])
/ numpy.outer(numpy.ones(yw.size), v.x[1:] - v.x[:-1])
- (u.values[1:, mask_x] - u.values[:-1, mask_x])
/ numpy.outer(u.y[1:] - u.y[:-1], numpy.ones(xw.size)))
return Field(label='vorticity',
time_step=time_step,
x=xw, y=yw,
values=w)
def get_velocity(self, time_step,
periodic_directions=[],
directory=None):
"""
Gets the velocity fields at a given time-step.
We first read the fluxes from file, then convert into velocity-components.
Parameters
----------
time_step: integer
Time-step at which the fluxes are read from file(s).
periodic_directions: list of strings, optional
Directions that uses periodic boundary conditions;
choices: 'x', 'y', 'z',
default: [].
solution_directory: string, optional
Directory containing the saved time-step folders;
default: None.
Returns
-------
ux, uy, uz: Field objects
Velocity in the x-, y-, and z-directions.
"""
print('[time-step {}] get velocity fields ...'.format(time_step))
fluxes = self.read_fluxes(time_step,
periodic_directions=periodic_directions,
directory=directory)
dim3 = (len(self.grid) == 3)
# get stations, cell-widths, and number of cells in x- and y-directions
x, y = self.grid[:2]
dx, dy = x[1:] - x[:-1], y[1:] - y[:-1]
if dim3:
# get stations, cell-widths, and number of cells in z-direction
z = self.grid[2]
dz = z[1:] - z[:-1]
if dim3:
ux = numpy.empty_like(fluxes[0].values, dtype=numpy.float64)
for k in range(fluxes[0].shape[0]):
for j in range(fluxes[0].shape[1]):
for i in range(fluxes[0].shape[2]):
ux[k, j, i] = fluxes[0].values[k, j, i] / dy[j] / dz[k]
ux = Field(label='x-velocity',
time_step=time_step,
x=fluxes[0].x, y=fluxes[0].y, z=fluxes[0].z,
values=ux)
uy = numpy.empty_like(fluxes[1].values, dtype=numpy.float64)
for k in range(fluxes[1].shape[0]):
for j in range(fluxes[1].shape[1]):
for i in range(fluxes[1].shape[2]):
uy[k, j, i] = fluxes[1].values[k, j, i] / dx[i] / dz[k]
uy = Field(label='y-velocity',
time_step=time_step,
x=fluxes[1].x, y=fluxes[1].y, z=fluxes[1].z,
values=uy)
uz = numpy.empty_like(fluxes[2].values, dtype=numpy.float64)
for k in range(fluxes[2].shape[0]):
for j in range(fluxes[2].shape[1]):
for i in range(fluxes[2].shape[2]):
uz[k, j, i] = fluxes[2].values[k, j, i] / dx[i] / dy[j]
uz = Field(label='z-velocity',
time_step=time_step,
x=fluxes[2].x, y=fluxes[2].y, z=fluxes[2].z,
values=uz)
return ux, uy, uz
else:
ux = numpy.empty_like(fluxes[0].values, dtype=numpy.float64)
for i in range(fluxes[0].values.shape[1]):
ux[:, i] = fluxes[0].values[:, i] / dy[:]
ux = Field(label='x-velocity',
time_step=time_step,
x=fluxes[0].x, y=fluxes[0].y,
values=ux)
uy = numpy.empty_like(fluxes[1].values, dtype=numpy.float64)
for j in range(fluxes[1].values.shape[0]):
uy[j, :] = fluxes[1].values[j, :] / dx[:]
uy = Field(label='y-velocity',
time_step=time_step,
x=fluxes[1].x, y=fluxes[1].y,
values=uy)
return ux, uy
def subtract(self, other, field_name, label=None):
"""
Subtracts one field to another in place.
Parameters
----------
other: Simulation object
Simulation to subtract.
field_name: string
Name of the field to subtract;
choices: 'pressure', 'vorticity',
'x-velocity', 'y-velocity',
'x-flux', 'y-flux'.
label: string, optional
Name of the output subtracted field;
default: None.
"""
difference = self.fields[field_name].subtract(other.fields[field_name],
label=label)
self.fields[difference.label] = difference
def get_difference(self, other, field_name, mask=None, norm=None):
"""
Returns the difference in a given norm between a field and another.
Parameters
----------
other: Simulation object
The other solution.
field_name: string
Name of the field to use.
mask: Simulation object, optional
Simulation whose grid will be used to project and compute the difference;
default: None (use grid of present simulation).
norm: string, optional
Norm to use to compute the difference;
default: None.
Returns
-------
difference: float
The difference between the two fields in a given norm.
"""
if mask:
x, y = mask.fields[field_name].x, mask.fields[field_name].y
else:
x, y = self.fields[field_name].x, self.fields[field_name].y
return self.fields[field_name].get_difference(other.fields[field_name],
x=x,
y=y,
norm=norm)
def get_differences(self, other, field_names, mask=None, norm=None):
"""
Returns the difference in a given norm between a field and another.
Parameters
----------
other: Simulation object
The other solution.
field_names: list of strings
Name of the fields to use.
mask: Simulation object, optional
Simulation whose grid will be used to project and compute the difference;
default: None (use grid of present simulation).
norm: string, optional
Norm to use to compute the difference;
default: None.
Returns
-------
differences: dictionary of (string, float) items
The difference between the two fields in a given norm,
for each requested field.
"""
errors = {}
for field_name in field_names:
errors[field_name] = self.get_difference(other, field_name,
mask=None, norm=None)
return errors
def plot_contour(self, field_name,
field_range=None,
filled_contour=True,
view=(None, None, None, None),
bodies=[],
time_increment=None,
save_directory=None, save_name=None, fmt='png',
colorbar=True,
cmap=None,
colors=None,
style=None,
width=8.0,
dpi=100):
"""
Plots and saves the field.
Parameters
----------
field_name: string
Name of the field to plot.
field_range: list of floats, optional
Min value, max value and number of contours to plot;
default: None.
filled_contour: boolean, optional
Set 'True' to create a filled contour;
default: True.
view: tuple or list of 4 floats, optional
Bottom-left and top-right coordinates of the rectangular view to plot;
default: (None, None, None, None), the whole domain.
bodies: list of Body objects, optional
The immersed bodies to add to the figure;
default: [] (no immersed body).
time_increment: float, optional
Time-increment used to advance to the simulation.
If provided, we display the time-unit in an annotation
on the top-left part of the figure;
default: None.
save_directory: string, optional
Directory where to save the figures;
default: None (will be the folder '<simu dir>/images').
save_name: string, optional
Prefix used to create the images directory and to save the files;
default: None (will be the name of the field).
fmt: string, optional
Format of the file to save;
default: 'png'.
colorbar: boolean, optional
Set 'True' to display an horizontal colobar
at the bottom-left of the figure;
default: True.
cmap: string, optional
The Matplotlib colormap to use;
default: None.
colors: string, optional
The Matplotlib colors to use;
default: None.
style: string, optional
Path of the Matplotlib style-sheet to use;
default: None.
width: float, optional
Width of the figure (in inches);
default: 8.
dpi: integer, optional
Dots per inch (resolution);
default: 100
"""
# set view
if isinstance(view, tuple):
view = list(view)
view[0] = (self.grid[0].min() if view[0] is None else view[0])
view[1] = (self.grid[1].min() if view[1] is None else view[1])
view[2] = (self.grid[0].max() if view[2] is None else view[2])
view[3] = (self.grid[1].max() if view[3] is None else view[3])
# create save directory if necessary
if not save_directory:
save_directory = os.path.join(self.directory, 'images')
folder = '{}_{:.2f}_{:.2f}_{:.2f}_{:.2f}'.format(field_name, *view)
save_directory = os.path.join(save_directory, folder)
if not os.path.isdir(save_directory):
os.makedirs(save_directory)
# load matplotlib style if provided and not already loaded
if style and not hasattr(self, 'style_loaded'):
from matplotlib import pyplot
try:
pyplot.style.use(style)
except:
try:
pyplot.style.use(os.path.join(os.environ['SNAKE'],
'snake',
'styles',
style + '.mplstyle'))
except:
print('[warning] could not load the matplotlib style-sheet '
'{}'.format(style))
pass
self.style_loaded = True
# plot contour
self.fields[field_name].plot_contour(field_range=field_range,
filled_contour=filled_contour,
view=view,
bodies=bodies,
time_increment=time_increment,
save_directory=save_directory,
save_name=save_name,
fmt=fmt,
colorbar=colorbar,
cmap=cmap,
colors=colors,
width=width,
dpi=dpi)
def plot_gridline_values(self, field_name,
x=[], y=[],
boundaries=(None, None),
plot_settings={},
plot_limits=(None, None, None, None),
save_directory=None,
show=False,
other_data=None,
other_settings={}):
"""
Plots the field values along either a set of vertical gridlines or a set
of horizontal gridlines.
Parameters
----------
field_name: string
Name of the field to plot.
x: list of floats, optional
List of vertical gridlines defined by their x-position;
default: [].
y: list of floats, optional
List of horizontal gridlines defined by their y-position;
default: [].
boundaries: 2-tuple of floats, optional
Gridline boundaries;
default: (None, None).
plot_settings: dictionary of (string, object) items, optional
Contains optional arguments to call pyplot.plot function for the gridline
data;
default: empty dictionary.
plot_limits: 4-tuple of floats, optional
Limits of the plot (x-start, x-end, y-start, y-end);
default: (None, None, None, None)
save_directory: string, optional
Directory where to save the figure;
default: None (does not save).
show: boolean, optional
Set 'True' if you want to display the figure;
default: False.
other_data: 2-tuple of 1d arrays of floats, optional
Other data to add to the figure (1st array contains the y-stations,
2nd array contains the values at the stations);
default: None.
other_settings: dictionary of (string, object) items, optional
Contains optional arguments to call pyplot.plot function for the other
data;
default: empty dictionary.
"""
if not isinstance(x, (list, tuple)):
x = [x]
if not isinstance(y, (list, tuple)):
y = [y]
if not (x or y):
print('[error] provide either x or y keyword arguments')
return
f = self.fields[field_name]
if x:
f.plot_vertical_gridline_values(x=x,
boundaries=boundaries,
plot_settings=plot_settings,
save_directory=save_directory,
show=show,
other_data=other_data,
other_plot_settings=other_settings)
if y:
f.plot_horizontal_gridline_values(y=y,
boundaries=boundaries,
plot_settings=plot_settings,
save_directory=save_directory,
show=show,
other_data=other_data,
other_plot_settings=other_settings)
def get_velocity_cell_centers(self):
"""
Interpolates the staggered velocity field to the cell-centers of the mesh.
Returns
-------
u, v, w: Field objects
Velocity at cell-centers in the x-, y-, and z-directions.
"""
dim3 = 'z-velocity' in self.fields.keys()
x_centers = self.fields['y-velocity'].x[1:-1]
y_centers = self.fields['x-velocity'].y[1:-1]
u, v = self.fields['x-velocity'].values, self.fields['y-velocity'].values
if dim3:
z_centers = self.fields['x-velocity'].z[1:-1]
w = self.fields['z-velocity'].values
u = 0.5 * (u[1:-1, 1:-1, :-1] + u[1:-1, 1:-1, 1:])
v = 0.5 * (v[1:-1, :-1, 1:-1] + v[1:-1:, 1:, 1:-1])
w = 0.5 * (w[:-1, 1:-1, 1:-1] + w[1:, 1:-1, 1:-1])
# tests
assert (z_centers.size, y_centers.size, x_centers.size) == u.shape
assert (z_centers.size, y_centers.size, x_centers.size) == v.shape
assert (z_centers.size, y_centers.size, x_centers.size) == w.shape
u = Field(label='x-velocity',
time_step=self.fields['x-velocity'].time_step,
x=x_centers, y=y_centers, z=z_centers,
values=u)
v = Field(label='y-velocity',
time_step=self.fields['y-velocity'].time_step,
x=x_centers, y=y_centers, z=z_centers,
values=v)
w = Field(label='z-velocity',
time_step=self.fields['z-velocity'].time_step,
x=x_centers, y=y_centers, z=z_centers,
values=w)
return u, v, w
else:
u = 0.5 * (u[1:-1, :-1] + u[1:-1, 1:])
v = 0.5 * (v[:-1, 1:-1] + v[1:, 1:-1])
# tests
assert (y_centers.size, x_centers.size) == u.shape
assert (y_centers.size, x_centers.size) == v.shape
u = Field(label='x-velocity',
time_step=self.fields['x-velocity'].time_step,
x=x_centers, y=y_centers,
values=u)
u = Field(label='y-velocity',
time_step=self.fields['y-velocity'].time_step,
x=x_centers, y=y_centers,
values=v)
return u, v
def write_vtk(self, field_name, time_step,
view=[[float('-inf'), float('-inf'), float('-inf')],
[float('inf'), float('inf'), float('inf')]],
stride=1):
"""
Writes the field in a .vtk file.
Parameters
----------
field_names: list of strings
Name of the field to write; choices: 'velocity', 'pressure'.
time_step: integer
Time-step to write.
view: list of floats, optional
Bottom-left and top-right coordinates of the rectangular view to write;
default: the whole domain.
stride: integer, optional
Stride at which the field is written;
default: 1.
"""
print('[info] writing the {} field into .vtk file ...'.format(field_name))
dim3 = (len(self.grid) == 3)
if field_name == 'velocity':
scalar_field = False
field = [self.fields['x-velocity'], self.fields['y-velocity']]
if dim3:
field.append(self.fields['z-velocity'])
elif field_name == 'pressure':
scalar_field = True
field = [self.fields['pressure']]
# get mask for the view
mx = numpy.where(numpy.logical_and(field[0].x > view[0][0],
field[0].x < view[1][0]))[0][::stride]
my = numpy.where(numpy.logical_and(field[0].y > view[0][1],
field[0].y < view[1][1]))[0][::stride]
if dim3:
mz = numpy.where(numpy.logical_and(field[0].z > view[0][2],
field[0].z < view[1][2]))[0][::stride]
# create directory where .vtk file will be saved
vtk_directory = os.path.join(self.directory, 'vtk_files', field_name)
if not os.path.isdir(vtk_directory):
print('[info] creating directory: {}'.format(vtk_directory))
os.makedirs(vtk_directory)
vtk_file_path = os.path.join(vtk_directory,
'{}{:0>7}.vtk'.format(field_name, time_step))
# get coordinates within the view
x = field[0].x[mx]
y = field[0].y[my]
z = (None if not dim3 else field[0].z[mz])
nx, ny, nz = x.size, y.size, (1 if not dim3 else z.size)
# write .vtk file
with open(vtk_file_path, 'w') as outfile:
outfile.write('# vtk DataFile Version 3.0\n')
outfile.write('contains {} field\n'.format(field_name))
outfile.write('ASCII\n')
outfile.write('DATASET RECTILINEAR_GRID\n')
outfile.write('DIMENSIONS {} {} {}\n'.format(nx, ny, nz))
outfile.write('X_COORDINATES {} double\n'.format(nx))
numpy.savetxt(outfile, x, fmt='%f')
outfile.write('Y_COORDINATES {} double\n'.format(ny))
numpy.savetxt(outfile, y, fmt='%f')
outfile.write('Z_COORDINATES {} double\n'.format(nz))
if dim3:
numpy.savetxt(outfile, z, fmt='%f')
else:
outfile.write('0.0\n')
outfile.write('POINT_DATA {}\n'.format(nx * ny * nz))
if scalar_field:
outfile.write('\nSCALARS {} double 1\nLOOKUP_TABLE default\n'
''.format(field_name))
if dim3:
values = field[0].values[mz[0]:mz[-1] + 1,
my[0]:my[-1] + 1,
mx[0]:mx[-1] + 1]
else:
values = field[0].values[my[0]:my[-1] + 1,
mx[0]:mx[-1] + 1]
numpy.savetxt(outfile, values.flatten(),
fmt='%.6f', delimiter='\t')
else:
outfile.write('\nVECTORS {} double\n'.format(field_name))
if dim3:
values_x = field[0].values[mz[0]:mz[-1] + 1,
my[0]:my[-1] + 1,
mx[0]:mx[-1] + 1]
values_y = field[1].values[mz[0]:mz[-1] + 1,
my[0]:my[-1] + 1,
mx[0]:mx[-1] + 1]
values_z = field[2].values[mz[0]:mz[-1] + 1,
my[0]:my[-1] + 1,
mx[0]:mx[-1] + 1]
numpy.savetxt(outfile,
numpy.c_[values_x.flatten(),
values_y.flatten(),
values_z.flatten()],
fmt='%.6f', delimiter='\t')
else:
values_x = field[0].values[my[0]:my[-1] + 1,
mx[0]:mx[-1] + 1]
values_y = field[1].values[my[0]:my[-1] + 1,
mx[0]:mx[-1] + 1]
numpy.savetxt(outfile, numpy.c_[values_x.flatten(),
values_y.flatten()],
fmt='%6f', delimiter='\t')
| mit |
huongttlan/statsmodels | statsmodels/discrete/tests/test_sandwich_cov.py | 24 | 18710 | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 09 21:29:20 2013
Author: Josef Perktold
"""
import os
import numpy as np
import pandas as pd
import statsmodels.discrete.discrete_model as smd
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod import families
from statsmodels.genmod.families import links
from statsmodels.regression.linear_model import OLS
import statsmodels.stats.sandwich_covariance as sc
from statsmodels.base.covtype import get_robustcov_results
from statsmodels.tools.tools import add_constant
from numpy.testing import assert_allclose, assert_equal
import statsmodels.tools._testing as smt
# get data and results as module global for now, TODO: move to class
from .results import results_count_robust_cluster as results_st
cur_dir = os.path.dirname(os.path.abspath(__file__))
filepath = os.path.join(cur_dir, "results", "ships.csv")
data_raw = pd.read_csv(filepath, index_col=False)
data = data_raw.dropna()
#mod = smd.Poisson.from_formula('accident ~ yr_con + op_75_79', data=dat)
# Don't use formula for tests against Stata because intercept needs to be last
endog = data['accident']
exog_data = data['yr_con op_75_79'.split()]
exog = add_constant(exog_data, prepend=False)
group = np.asarray(data['ship'], int)
exposure = np.asarray(data['service'])
# TODO get the test methods from regression/tests
class CheckCountRobustMixin(object):
def test_basic(self):
res1 = self.res1
res2 = self.res2
if len(res1.params) == (len(res2.params) - 1):
# Stata includes lnalpha in table for NegativeBinomial
mask = np.ones(len(res2.params), np.bool_)
mask[-2] = False
res2_params = res2.params[mask]
res2_bse = res2.bse[mask]
else:
res2_params = res2.params
res2_bse = res2.bse
assert_allclose(res1._results.params, res2_params, 1e-4)
assert_allclose(self.bse_rob / self.corr_fact, res2_bse, 6e-5)
@classmethod
def get_robust_clu(cls):
res1 = cls.res1
cov_clu = sc.cov_cluster(res1, group)
cls.bse_rob = sc.se_cov(cov_clu)
nobs, k_vars = res1.model.exog.shape
k_params = len(res1.params)
#n_groups = len(np.unique(group))
corr_fact = (nobs-1.) / float(nobs - k_params)
# for bse we need sqrt of correction factor
cls.corr_fact = np.sqrt(corr_fact)
def test_oth(self):
res1 = self.res1
res2 = self.res2
assert_allclose(res1._results.llf, res2.ll, 1e-4)
assert_allclose(res1._results.llnull, res2.ll_0, 1e-4)
def test_ttest(self):
smt.check_ttest_tvalues(self.res1)
def test_waldtest(self):
smt.check_ftest_pvalues(self.res1)
class TestPoissonClu(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_clu
mod = smd.Poisson(endog, exog)
cls.res1 = mod.fit(disp=False)
cls.get_robust_clu()
class TestPoissonCluGeneric(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_clu
mod = smd.Poisson(endog, exog)
cls.res1 = res1 = mod.fit(disp=False)
debug = False
if debug:
# for debugging
cls.bse_nonrobust = cls.res1.bse.copy()
cls.res1 = res1 = mod.fit(disp=False)
cls.get_robust_clu()
cls.res3 = cls.res1
cls.bse_rob3 = cls.bse_rob.copy()
cls.res1 = res1 = mod.fit(disp=False)
from statsmodels.base.covtype import get_robustcov_results
#res_hc0_ = cls.res1.get_robustcov_results('HC1')
get_robustcov_results(cls.res1._results, 'cluster',
groups=group,
use_correction=True,
df_correction=True, #TODO has no effect
use_t=False, #True,
use_self=True)
cls.bse_rob = cls.res1.bse
nobs, k_vars = res1.model.exog.shape
k_params = len(res1.params)
#n_groups = len(np.unique(group))
corr_fact = (nobs-1.) / float(nobs - k_params)
# for bse we need sqrt of correction factor
cls.corr_fact = np.sqrt(corr_fact)
class TestPoissonHC1Generic(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_hc1
mod = smd.Poisson(endog, exog)
cls.res1 = mod.fit(disp=False)
from statsmodels.base.covtype import get_robustcov_results
#res_hc0_ = cls.res1.get_robustcov_results('HC1')
get_robustcov_results(cls.res1._results, 'HC1', use_self=True)
cls.bse_rob = cls.res1.bse
nobs, k_vars = mod.exog.shape
corr_fact = (nobs) / float(nobs - 1.)
# for bse we need sqrt of correction factor
cls.corr_fact = np.sqrt(1./corr_fact)
# TODO: refactor xxxFit to full testing results
class TestPoissonCluFit(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_clu
mod = smd.Poisson(endog, exog)
# scaling of cov_params_default to match Stata
# TODO should the default be changed?
nobs, k_params = mod.exog.shape
sc_fact = (nobs-1.) / float(nobs - k_params)
cls.res1 = mod.fit(disp=False, cov_type='cluster',
cov_kwds=dict(groups=group,
use_correction=True,
scaling_factor=1. / sc_fact,
df_correction=True), #TODO has no effect
use_t=False, #True,
)
# The model results, t_test, ... should also work without
# normalized_cov_params, see #2209
# Note: we cannot set on the wrapper res1, we need res1._results
cls.res1._results.normalized_cov_params = None
cls.bse_rob = cls.res1.bse
# backwards compatibility with inherited test methods
cls.corr_fact = 1
def test_basic_inference(self):
res1 = self.res1
res2 = self.res2
rtol = 1e-7
assert_allclose(res1.params, res2.params, rtol=1e-8)
assert_allclose(res1.bse, res2.bse, rtol=rtol)
assert_allclose(res1.tvalues, res2.tvalues, rtol=rtol, atol=1e-8)
assert_allclose(res1.pvalues, res2.pvalues, rtol=rtol, atol=1e-20)
ci = res2.params_table[:, 4:6]
assert_allclose(res1.conf_int(), ci, rtol=5e-7, atol=1e-20)
class TestPoissonHC1Fit(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_hc1
mod = smd.Poisson(endog, exog)
cls.res1 = mod.fit(disp=False, cov_type='HC1')
cls.bse_rob = cls.res1.bse
nobs, k_vars = mod.exog.shape
corr_fact = (nobs) / float(nobs - 1.)
# for bse we need sqrt of correction factor
cls.corr_fact = np.sqrt(1./corr_fact)
class TestPoissonCluExposure(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_exposure_clu #nonrobust
mod = smd.Poisson(endog, exog, exposure=exposure)
cls.res1 = mod.fit(disp=False)
cls.get_robust_clu()
class TestPoissonCluExposureGeneric(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_exposure_clu #nonrobust
mod = smd.Poisson(endog, exog, exposure=exposure)
cls.res1 = res1 = mod.fit(disp=False)
from statsmodels.base.covtype import get_robustcov_results
#res_hc0_ = cls.res1.get_robustcov_results('HC1')
get_robustcov_results(cls.res1._results, 'cluster',
groups=group,
use_correction=True,
df_correction=True, #TODO has no effect
use_t=False, #True,
use_self=True)
cls.bse_rob = cls.res1.bse #sc.se_cov(cov_clu)
nobs, k_vars = res1.model.exog.shape
k_params = len(res1.params)
#n_groups = len(np.unique(group))
corr_fact = (nobs-1.) / float(nobs - k_params)
# for bse we need sqrt of correction factor
cls.corr_fact = np.sqrt(corr_fact)
class TestGLMPoissonClu(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_clu
mod = smd.Poisson(endog, exog)
mod = GLM(endog, exog, family=families.Poisson())
cls.res1 = mod.fit()
cls.get_robust_clu()
class TestGLMPoissonCluGeneric(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_clu
mod = GLM(endog, exog, family=families.Poisson())
cls.res1 = res1 = mod.fit()
get_robustcov_results(cls.res1._results, 'cluster',
groups=group,
use_correction=True,
df_correction=True, #TODO has no effect
use_t=False, #True,
use_self=True)
cls.bse_rob = cls.res1.bse
nobs, k_vars = res1.model.exog.shape
k_params = len(res1.params)
#n_groups = len(np.unique(group))
corr_fact = (nobs-1.) / float(nobs - k_params)
# for bse we need sqrt of correction factor
cls.corr_fact = np.sqrt(corr_fact)
class TestGLMPoissonHC1Generic(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_hc1
mod = GLM(endog, exog, family=families.Poisson())
cls.res1 = mod.fit()
#res_hc0_ = cls.res1.get_robustcov_results('HC1')
get_robustcov_results(cls.res1._results, 'HC1', use_self=True)
cls.bse_rob = cls.res1.bse
nobs, k_vars = mod.exog.shape
corr_fact = (nobs) / float(nobs - 1.)
# for bse we need sqrt of correction factor
cls.corr_fact = np.sqrt(1./corr_fact)
# TODO: refactor xxxFit to full testing results
class TestGLMPoissonCluFit(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_clu
mod = GLM(endog, exog, family=families.Poisson())
cls.res1 = res1 = mod.fit(cov_type='cluster',
cov_kwds=dict(groups=group,
use_correction=True,
df_correction=True), #TODO has no effect
use_t=False, #True,
)
# The model results, t_test, ... should also work without
# normalized_cov_params, see #2209
# Note: we cannot set on the wrapper res1, we need res1._results
cls.res1._results.normalized_cov_params = None
cls.bse_rob = cls.res1.bse
nobs, k_vars = mod.exog.shape
k_params = len(cls.res1.params)
#n_groups = len(np.unique(group))
corr_fact = (nobs-1.) / float(nobs - k_params)
# for bse we need sqrt of correction factor
cls.corr_fact = np.sqrt(corr_fact)
class TestGLMPoissonHC1Fit(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_poisson_hc1
mod = GLM(endog, exog, family=families.Poisson())
cls.res1 = mod.fit(cov_type='HC1')
cls.bse_rob = cls.res1.bse
nobs, k_vars = mod.exog.shape
corr_fact = (nobs) / float(nobs - 1.)
# for bse we need sqrt of correction factor
cls.corr_fact = np.sqrt(1./corr_fact)
class TestNegbinClu(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_negbin_clu
mod = smd.NegativeBinomial(endog, exog)
cls.res1 = mod.fit(disp=False)
cls.get_robust_clu()
class TestNegbinCluExposure(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_negbin_exposure_clu #nonrobust
mod = smd.NegativeBinomial(endog, exog, exposure=exposure)
cls.res1 = mod.fit(disp=False)
cls.get_robust_clu()
# mod_nbe = smd.NegativeBinomial(endog, exog, exposure=data['service'])
# res_nbe = mod_nbe.fit()
# mod_nb = smd.NegativeBinomial(endog, exog)
# res_nb = mod_nb.fit()
#
# cov_clu_nb = sc.cov_cluster(res_nb, group)
# k_params = k_vars + 1
# print sc.se_cov(cov_clu_nb / ((nobs-1.) / float(nobs - k_params)))
#
# wt = res_nb.wald_test(np.eye(len(res_nb.params))[1:3], cov_p=cov_clu_nb/((nobs-1.) / float(nobs - k_params)))
# print wt
#
# print dir(results_st)
class TestNegbinCluGeneric(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_negbin_clu
mod = smd.NegativeBinomial(endog, exog)
cls.res1 = res1 = mod.fit(disp=False)
get_robustcov_results(cls.res1._results, 'cluster',
groups=group,
use_correction=True,
df_correction=True, #TODO has no effect
use_t=False, #True,
use_self=True)
cls.bse_rob = cls.res1.bse
nobs, k_vars = mod.exog.shape
k_params = len(cls.res1.params)
#n_groups = len(np.unique(group))
corr_fact = (nobs-1.) / float(nobs - k_params)
# for bse we need sqrt of correction factor
cls.corr_fact = np.sqrt(corr_fact)
class TestNegbinCluFit(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_negbin_clu
mod = smd.NegativeBinomial(endog, exog)
cls.res1 = res1 = mod.fit(disp=False, cov_type='cluster',
cov_kwds=dict(groups=group,
use_correction=True,
df_correction=True), #TODO has no effect
use_t=False, #True,
)
cls.bse_rob = cls.res1.bse
nobs, k_vars = mod.exog.shape
k_params = len(cls.res1.params)
#n_groups = len(np.unique(group))
corr_fact = (nobs-1.) / float(nobs - k_params)
# for bse we need sqrt of correction factor
cls.corr_fact = np.sqrt(corr_fact)
class TestNegbinCluExposureFit(CheckCountRobustMixin):
@classmethod
def setup_class(cls):
cls.res2 = results_st.results_negbin_exposure_clu #nonrobust
mod = smd.NegativeBinomial(endog, exog, exposure=exposure)
cls.res1 = res1 = mod.fit(disp=False, cov_type='cluster',
cov_kwds=dict(groups=group,
use_correction=True,
df_correction=True), #TODO has no effect
use_t=False, #True,
)
cls.bse_rob = cls.res1.bse
nobs, k_vars = mod.exog.shape
k_params = len(cls.res1.params)
#n_groups = len(np.unique(group))
corr_fact = (nobs-1.) / float(nobs - k_params)
# for bse we need sqrt of correction factor
cls.corr_fact = np.sqrt(corr_fact)
class CheckDiscreteGLM(object):
# compare GLM with other models, no verified reference results
def test_basic(self):
res1 = self.res1
res2 = self.res2
assert_equal(res1.cov_type, self.cov_type)
assert_equal(res2.cov_type, self.cov_type)
assert_allclose(res1.params, res2.params, rtol=1e-13)
# bug TODO res1.scale missing ? in Gaussian/OLS
assert_allclose(res1.bse, res2.bse, rtol=1e-13)
# if not self.cov_type == 'nonrobust':
# assert_allclose(res1.bse * res1.scale, res2.bse, rtol=1e-13)
# else:
# assert_allclose(res1.bse, res2.bse, rtol=1e-13)
class TestGLMLogit(CheckDiscreteGLM):
@classmethod
def setup_class(cls):
endog_bin = (endog > endog.mean()).astype(int)
cls.cov_type = 'cluster'
mod1 = GLM(endog_bin, exog, family=families.Binomial())
cls.res1 = mod1.fit(cov_type='cluster', cov_kwds=dict(groups=group))
mod1 = smd.Logit(endog_bin, exog)
cls.res2 = mod1.fit(cov_type='cluster', cov_kwds=dict(groups=group))
class T_estGLMProbit(CheckDiscreteGLM):
# invalid link. What's Probit as GLM?
@classmethod
def setup_class(cls):
endog_bin = (endog > endog.mean()).astype(int)
cls.cov_type = 'cluster'
mod1 = GLM(endog_bin, exog, family=families.Gaussian(link=links.CDFLink))
cls.res1 = mod1.fit(cov_type='cluster', cov_kwds=dict(groups=group))
mod1 = smd.Probit(endog_bin, exog)
cls.res2 = mod1.fit(cov_type='cluster', cov_kwds=dict(groups=group))
class TestGLMGaussNonRobust(CheckDiscreteGLM):
@classmethod
def setup_class(cls):
cls.cov_type = 'nonrobust'
mod1 = GLM(endog, exog, family=families.Gaussian())
cls.res1 = mod1.fit()
mod2 = OLS(endog, exog)
cls.res2 = mod2.fit()
class TestGLMGaussClu(CheckDiscreteGLM):
@classmethod
def setup_class(cls):
cls.cov_type = 'cluster'
mod1 = GLM(endog, exog, family=families.Gaussian())
cls.res1 = mod1.fit(cov_type='cluster', cov_kwds=dict(groups=group))
mod2 = OLS(endog, exog)
cls.res2 = mod2.fit(cov_type='cluster', cov_kwds=dict(groups=group))
class TestGLMGaussHC(CheckDiscreteGLM):
@classmethod
def setup_class(cls):
cls.cov_type = 'HC0'
mod1 = GLM(endog, exog, family=families.Gaussian())
cls.res1 = mod1.fit(cov_type='HC0')
mod2 = OLS(endog, exog)
cls.res2 = mod2.fit(cov_type='HC0')
if __name__ == '__main__':
tt = TestPoissonClu()
tt.setup_class()
tt.test_basic()
tt = TestNegbinClu()
tt.setup_class()
tt.test_basic()
| bsd-3-clause |
meduz/scikit-learn | sklearn/feature_extraction/tests/test_text.py | 39 | 36062 | from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.random import choice
from sklearn.utils.testing import (assert_equal, assert_false, assert_true,
assert_not_equal, assert_almost_equal,
assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry, SkipTest)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), set(stoplist))
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_equal(t2.idf_, None)
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_countvectorizer_vocab_sets_when_pickling():
# ensure that vocabulary of type set is coerced to a list to
# preserve iteration ordering after deserialization
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_set = set(choice(vocab_words, size=5, replace=False,
random_state=rng))
cv = CountVectorizer(vocabulary=vocab_set)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_countvectorizer_vocab_dicts_when_pickling():
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_dict = dict()
words = choice(vocab_words, size=5, replace=False, random_state=rng)
for y in range(0, 5):
vocab_dict[words[y]] = y
cv = CountVectorizer(vocabulary=vocab_dict)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
def test_vectorizer_string_object_as_input():
message = ("Iterable over raw text documents expected, "
"string object received.")
for vec in [CountVectorizer(), TfidfVectorizer(), HashingVectorizer()]:
assert_raise_message(
ValueError, message, vec.fit_transform, "hello world!")
assert_raise_message(
ValueError, message, vec.fit, "hello world!")
assert_raise_message(
ValueError, message, vec.transform, "hello world!")
| bsd-3-clause |
brentp/clustermodel | clustermodel/__main__.py | 1 | 18861 | import sys
import gzip
import re
from itertools import groupby, izip_longest
from collections import OrderedDict
import numpy as np
import pandas as pd
from aclust import mclust
from .plotting import plot_dmr, plot_hbar, plot_continuous
from . import feature_gen, cluster_to_dataframe, clustered_model, CPUS
from .clustermodel import r
xopen = lambda f: gzip.open(f) if f.endswith('.gz') else open(f)
def is_numeric(pd_series):
if np.issubdtype(pd_series.dtype, int) or \
np.issubdtype(pd_series.dtype, float):
return len(pd_series.unique()) > 2
return False
def run_model(clusters, covs, model, X, outlier_sds, combine, bumping, betareg,
gee_args, skat, counts):
# we turn the cluster list into a pandas dataframe with columns
# of samples and rows of probes. these must match our covariates
cluster_dfs = [cluster_to_dataframe(cluster, columns=covs.index)
for cluster in clusters]
if clusters[0][0].weights is not None:
weight_dfs = [cluster_to_dataframe(cluster, columns=covs.index,
weights=True)
for cluster in clusters]
else:
weight_dfs = None
# now we want to test a model on our clustered dataset.
res = clustered_model(covs, cluster_dfs, model, X=X, weights=weight_dfs,
gee_args=gee_args, combine=combine, bumping=bumping,
betareg=betareg,
skat=skat, counts=counts, outlier_sds=outlier_sds)
res['chrom'], res['start'], res['end'], res['n_probes'] = ("CHR", 1, 1, 0)
if "cluster_id" in res.columns:
# start at 1 because we using 1:nclusters in R
for i, c in enumerate(clusters, start=1):
res.ix[res.cluster_id == i, 'chrom'] = c[0].group
res.ix[res.cluster_id == i, 'start'] = c[0].start
res.ix[res.cluster_id == i, 'end'] = c[-1].end
res.ix[res.cluster_id == i, 'n_probes'] = len(c)
else:
assert len(clusters) == 1
res['chrom'] = clusters[0][0].group
res['start'] = clusters[0][0].start
res['end'] = clusters[-1][-1].end
res['n_probes'] = len(clusters[0])
return res
def distX(dmr, expr):
strand = str(expr.get('strand', '+'))
if strand not in "+-": strand = "+"
dmr['distance'] = 0
if dmr['end'] < expr['start']:
dmr['distance'] = expr['start'] - dmr['end']
# dmr is left of gene. that means it is upstream if strand is +
# we use "-" for upstream
if strand == "+":
dmr['distance'] *= -1
elif dmr['start'] > expr['end']:
dmr['distance'] = dmr['start'] - expr['end']
# dmr is right of gene. that is upstream if strand is -
# use - for upstream
if strand == "-":
dmr['distance'] *= -1
dmr['Xstart'], dmr['Xend'], dmr['Xstrand'] = expr['start'], expr['end'], expr['strand']
dmr['Xname'] = expr.get('name', expr.get('gene', dmr.get('X', 'NA')))
if dmr['chrom'] != expr['chrom']:
dmr['distance'] = np.nan
def clustermodel(fcovs, fmeth, model,
# clustering args
max_dist=200, linkage='complete', rho_min=0.32,
min_clust_size=1,
merge_linkage=None,
max_merge_dist=0,
counts=False,
sep="\t",
X=None, X_locs=None, X_dist=None,
weights=None,
outlier_sds=None,
combine=False, bumping=False, betareg=False,
gee_args=(), skat=False,
png_path=None):
# an iterable of feature objects
# from here, weights are attached to the feature.
feature_iter = feature_gen(fmeth, rho_min=rho_min, weights=weights)
assert min_clust_size >= 1
cluster_gen = (c for c in mclust(feature_iter,
max_dist=max_dist,
linkage=linkage,
merge_linkage=merge_linkage,
max_merge_dist=max_merge_dist
)
if len(c) >= min_clust_size)
for res in clustermodelgen(fcovs, cluster_gen, model, sep=sep,
X=X, X_locs=X_locs, X_dist=X_dist,
outlier_sds=outlier_sds,
combine=combine, bumping=bumping, betareg=betareg,
gee_args=gee_args, skat=skat, counts=counts, png_path=png_path):
yield res
def fix_name(name, patt=re.compile("-|:| ")):
"""
>>> fix_name('asd f')
'asd.f'
>>> fix_name('asd-f')
'asd.f'
>>> fix_name('a:s:d-f')
'a.s.d.f'
"""
return re.sub(patt, ".", name)
def groups_of(n, iterable):
args = [iter(iterable)] * n
for x in izip_longest(*args):
yield [v for v in x if v is not None]
def clustermodelgen(fcovs, cluster_gen, model, sep="\t",
X=None, X_locs=None, X_dist=None,
outlier_sds=None,
combine=False, bumping=False,
betareg=False, gee_args=(), skat=False,
counts=False,
png_path=None):
covs = (pd.read_csv if fcovs.endswith(".csv") else pd.read_table)(fcovs, index_col=0)
covariate = model.split("~")[1].split("+")[0].strip()
Xvar = X
if X is not None:
# read in once in R, then subset by probes
r('Xfull = readX("%s")' % X)
Xvar = 'Xfull'
# read expression into memory and pull out subsets as needed.
if not X_locs is None:
# change names so R formulas are OK
X_locs = pd.read_table(xopen(X_locs), index_col="probe")
X_locs.ix[:, 0] = map(str, X_locs.ix[:, 0])
X_locs.index = [fix_name(xi) for xi in X_locs.index]
# just reading in the first column to make sure we're using probes that
# exist in the X matrix
Xi = pd.read_table(xopen(X), index_col=0, usecols=[0]).index
X_probes = set([fix_name(xi) for xi in Xi])
# weights are attached to the feature
for clusters in groups_of(50 * CPUS if X is None else
8 * CPUS if X_locs is not None
else CPUS, cluster_gen):
if not X_locs is None:
probes = []
# here, we take any X probe that's associated with any single
# cluster and test it against all clusters. This tends to work out
# because the clusters are sorted by location and it helps
# parallelization.
for cluster in clusters:
chrom = cluster[0].group
start, end = cluster[0].start, cluster[-1].end
if X_dist is not None:
probe_locs = X_locs[((X_locs.ix[:, 0] == chrom) &
(X_locs.ix[:, 1] < (end + X_dist)) &
(X_locs.ix[:, 2] > (start - X_dist)))]
probes.extend([p for p in probe_locs.index if p in X_probes])
if X_dist is None:
probe_locs = X_locs
probes = list(probe_locs.index)
if len(probes) == 0: continue
probes = OrderedDict.fromkeys(probes).keys()
# we send do the extraction directly in R so the only data
# sent is the name of the probes. Then we take the subset
# inside R
r['XXprobes'] = probes
Xvar = 'Xfull[XXprobes,,drop=FALSE]'
if gee_args and isinstance(gee_args, basestring):
gee_args = gee_args.split(",")
res = run_model(clusters, covs, model, Xvar, outlier_sds, combine,
bumping, betareg, gee_args, skat, counts)
j = 0
for i, row in res.iterrows():
row = dict(row)
if X_locs is not None:
distX(row, dict(X_locs.ix[row['X'], :]))
if np.isnan(row['distance']) or abs(row['distance']) > X_dist: continue
yield row
# blech. steal regions since we often want to plot everything.
if (row['p'] < 1e-4 or "--regions" in sys.argv) and png_path:
if 'X' in row and row['p'] > 1e-8: continue
cluster_df = cluster_to_dataframe(clusters[j], columns=covs.index)
weights_df = None
if clusters[j][0].weights is not None:
weights_df = cluster_to_dataframe(clusters[j],
columns=covs.index, weights=True)
plot_res(row, png_path, covs, covariate, cluster_df, weights_df)
j += 1
def plot_res(res, png_path, covs, covariate, cluster_df, weights_df=None):
from matplotlib import pyplot as plt
from mpltools import style
style.use('ggplot')
region = "{chrom}_{start}_{end}".format(**res)
if png_path.endswith('show'):
png = None
elif png_path.endswith(('.png', '.pdf')):
png = "%s.%s%s" % (png_path[:-4], region, png_path[-4:])
elif png_path:
png = "%s.%s.png" % (png_path.rstrip("."), region)
if is_numeric(getattr(covs, covariate)):
f = plot_continuous(covs, cluster_df, covariate, res['chrom'], res, png)
else:
f = plt.figure(figsize=(11, 4))
ax = f.add_subplot(1, 1, 1)
if 'spaghetti' in png_path and cluster_df.shape[0] > 1:
plot_dmr(covs, cluster_df, covariate, res['chrom'], res, png,
weights_df)
else:
plot_hbar(covs, cluster_df, covariate, res['chrom'], res, png)
plt.title('p-value: %.3g %s: %.3f' % (res['p'], covariate, res['coef']))
f.set_tight_layout(True)
if png:
plt.savefig(png)
else:
plt.show()
plt.close()
def main_example():
fcovs = "clustermodel/tests/example-covariates.txt"
fmeth = "clustermodel/tests/example-methylation.txt.gz"
model = "methylation ~ disease + gender"
for cluster_p in clustermodel(fcovs, fmeth, model):
if cluster_p['p'] < 1e-5:
print(cluster_p)
def add_modelling_args(p):
mp = p.add_argument_group('modeling choices (choose one or specify a '
'mixed-model using lme4 syntax)')
group = mp.add_mutually_exclusive_group()
group.add_argument('--skat', action='store_true')
group.add_argument('--gee-args',
help='comma-delimited correlation-structure, variable')
group.add_argument('--combine', choices=('liptak', 'z-score'))
group.add_argument('--bumping', action="store_true")
p.add_argument('--counts', action="store_true",
help="y is count data. model must be a mixed-effect model")
p.add_argument('--betareg', action="store_true",
help="use beta-regression in which case `methylation` should be"
" the ratio and --weights could be the read-depths.")
p.add_argument('model',
help="model in R syntax, e.g. 'methylation ~ disease'")
p.add_argument('covs', help="tab-delimited file of covariates: shape is "
"n_samples * n_covariates")
p.add_argument('methylation', help="tab-delimited file of methylation"
" rows of this file must match the columns of `covs`"
" shape is n_probes * n_samples")
def add_expression_args(p):
ep = p.add_argument_group('optional expression parameters')
ep.add_argument('--X', help='matrix file with same sample columns as'
'methylation with values of e.g. expression. Will perform a '
' methyl-eQTL--for each DMR. As such, it is best to run this on '
' subsets of data, e.g. only looking for cis relationships')
ep.add_argument('--X-locs', help="BED file with locations of probes from"
" the first column in --X. Should have a 'probe' column header")
ep.add_argument('--X-dist', type=int, help="only look at cis interactions"
" between X and methylation sites with this as the maximum",
default=None)
def add_weight_args(p):
wp = p.add_argument_group('weighted regression')
wp.add_argument('--weights', help="matrix file with of shape probes * "
"samples with values for weights in the regression. Likely these "
"would be read-counts (depth) for BS-Seq data.")
def add_clustering_args(p):
cp = p.add_argument_group('clustering parameters')
cp.add_argument('--rho-min', type=float, default=0.32,
help="minimum correlation to merge 2 probes")
cp.add_argument('--min-cluster-size', type=int, default=1)
cp.add_argument('--linkage', choices=['single', 'complete'],
default='complete', help="linkage method")
cp.add_argument('--max-dist', default=200, type=int,
help="never merge probes this distant")
cp.add_argument('--merge-linkage', default=0.24, type=float,
help='value between 0 and 1 indicating percentage of probes '
'that must be correlated to merge 2 clusters')
cp.add_argument('--max-merge-dist', default=None, type=int,
help='max distance between 2 already defined clusters that '
' could be merge based on --merge-linkage. A number'
' is larger than max-dist. Default is 1.5 * max-dist')
def add_misc_args(p):
p.add_argument('--png-path',
help="""path to save a png of regions with low p-values. Use
'show' to plot in GUI. If this contains the string 'spaghetti', it will draw a
a spaghetti plot, otherwise, it's a histogram plot""")
p.add_argument('--outlier-sds', type=float, default=30,
help="remove points that are more than this many standard "
"deviations away from the mean")
def get_method(a, n_probes=None):
if a.gee_args is not None:
method = 'gee:' + ",".join(a.gee_args)
else:
if a.combine:
method = a.combine
if a.betareg:
if n_probes > 1:
method += "/beta-regression"
else:
method = "beta-regression"
elif a.bumping: method = 'bumping'
elif a.skat: method = 'skat'
else:
assert "|" in a.model
method = "mixed-model"
if n_probes == 1 and method != "beta-regression":
method = "lm"
return method
def gen_clusters_from_regions(feature_iter, regions):
header = xopen(regions).next().split("\t")
has_header = not (header[1].isdigit() and header[2].isdigit())
regions = pd.read_table(regions, header=0 if has_header else False)
regions.columns = 'chrom start end'.split() + list(regions.columns[3:])
regions['region'] = ['%s:%i-%i' % t for t in zip(regions['chrom'],
regions['start'],
regions['end'])]
def by_region(feat):
sub = regions[((regions['chrom'] == feat.group) &
(feat.start <= regions['end']) &
(feat.end >= regions['start']))]['region']
sub = list(sub)
if len(sub) == 0: return False
assert len(sub) == 1, (feat, "overlaps multiple regions")
return str(sub[0])
# TODO: send the region back to the caller as well
for region, cluster in groupby(feature_iter, by_region):
if not region: continue
yield list(cluster)
def main(args=sys.argv[1:]):
import argparse
p = argparse.ArgumentParser(__doc__)
add_modelling_args(p)
if not "--regions" in args:
add_clustering_args(p)
else:
# want to specify existing regions, not use found ones.
p.add_argument('--regions', required=True,
help="BED file of regions to test", metavar="BED")
add_misc_args(p)
add_expression_args(p)
add_weight_args(p)
a = p.parse_args(args)
if a.gee_args:
a.gee_args = a.gee_args.split(",")
if a.betareg and not a.combine:
sys.stderr.write("must specifiy a --combine argument when using"
" beta-regression\n")
sys.exit(p.print_usage())
if not "--regions" in args and a.max_merge_dist is None:
a.max_merge_dist = 1.5 * a.max_dist
fmt = "{chrom}\t{start}\t{end}\t{coef}\t{p}\t{icoef}\t{n_probes}\t{model}\t{covariate}\t{method}"
if a.betareg:
fmt = "{chrom}\t{start}\t{end}\t{coef}\t{p}\t{n_probes}\t{model}\t{covariate}\t{method}"
if a.X_locs:
fmt += "\t{Xname}\t{Xstart}\t{Xend}\t{Xstrand}\t{distance}"
print("#" + fmt.replace("}", "").replace("{", ""))
if "--regions" in args:
# fmt = "{chrom}\t{start}\t{end}\t{coef}\t{p}\t{icoef}\t{n_probes}\t{model}\t{method}"
feature_iter = feature_gen(a.methylation, weights=a.weights)
cluster_gen = gen_clusters_from_regions(feature_iter, a.regions)
for c in clustermodelgen(a.covs, cluster_gen, a.model,
X=a.X,
X_locs=a.X_locs,
X_dist=a.X_dist,
outlier_sds=a.outlier_sds,
combine=a.combine,
bumping=a.bumping,
betareg=a.betareg,
gee_args=a.gee_args,
skat=a.skat,
counts=a.counts,
png_path=a.png_path):
c['method'] = get_method(a, c['n_probes'])
print(fmt.format(**c))
else:
for c in clustermodel(a.covs, a.methylation, a.model,
max_dist=a.max_dist,
linkage=a.linkage,
rho_min=a.rho_min,
min_clust_size=a.min_cluster_size,
merge_linkage=a.merge_linkage,
max_merge_dist=a.max_merge_dist,
combine=a.combine,
bumping=a.bumping,
betareg=a.betareg,
gee_args=a.gee_args,
skat=a.skat,
counts=a.counts,
X=a.X,
X_locs=a.X_locs,
X_dist=a.X_dist,
weights=a.weights,
outlier_sds=a.outlier_sds,
png_path=a.png_path):
c['method'] = get_method(a, c['n_probes'])
print(fmt.format(**c))
if __name__ == "__main__":
import sys
if len(sys.argv) > 1 and sys.argv[1] == "example":
sys.exit(main_example())
if len(sys.argv) > 1 and sys.argv[1] == "simulate":
from . import simulate
sys.exit(simulate.main(sys.argv[2:]))
# want to specify existing regions, not use found ones.
main()
| bsd-3-clause |
google-research/google-research | aqt/utils/analysis_utils.py | 1 | 4593 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of commonly used convenience functions for experiment analysis."""
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple
import dataclasses
import pandas as pd
import tree
from aqt.utils import pandas_utils
from aqt.utils import report_utils
def flatten_with_joined_string_paths(
dictionary):
"""Flattens nested dict to single level dict with joined paths as keys."""
flattened = tree.flatten_with_path(structure=dictionary)
flattened_dict = {}
# join path tuples to single string
for path_tuple, val in flattened:
# convert all path elements to strings
path = [str(s) for s in path_tuple]
path = '/'.join(path)
flattened_dict[path] = val
return flattened_dict
def convert_report_to_flat_dict_default(
report):
"""Selects subset of report and flattens it to a single level dict.
This function selects all information except what's stored under the fields
`report_query_args` and `metadata_corp`.
This function serves as an example for how to parse an ExperimentReport
into a dataframe row by flattening it to a row_dict, with keys corresponding
to dataframe columns.
The ExperimentReport dataclass likely contains more information then you need
for your analysis, so you can write your own function to pick and choose the
information you want. You can refer to report_utils.ExperimentReport for
documentation of all available fields.
You can pass your custom function into convert_reports_to_dataframe().
Args:
report: An instance of ExperimentReport.
Returns:
A flattened dict representing a dataframe row.
"""
row_dict = {}
# Add smoothed metrics if present
if report.metrics is not None:
flattened_metrics = dict(flatten_with_joined_string_paths(report.metrics))
# merge dicts
row_dict = {**row_dict, **flattened_metrics}
# Add unsmoothed metrics if present
if report.unsmoothed_metrics is not None:
flattened_unsmoothed_metrics = dict(
flatten_with_joined_string_paths(report.unsmoothed_metrics))
flattened_unsmoothed_metrics = {
f'unsmoothed/{k}': v for k, v in flattened_unsmoothed_metrics.items()
}
# merge dicts
row_dict = {**row_dict, **flattened_unsmoothed_metrics}
# Ignore following fields because they have already been added, or we chose
# not to include them.
report_fields_to_ignore = {
'metrics',
'unsmoothed_metrics',
}
# Add other report fields.
for field in dataclasses.fields(report):
if field.name not in report_fields_to_ignore:
row_dict[field.name] = getattr(report, field.name)
return row_dict
def convert_reports_to_dataframe(
reports,
convert_report_to_flat_dict_fn = convert_report_to_flat_dict_default
):
"""Converts a list of ExperimentReport instances to a pandas dataframe.
Args:
reports: List of ExperimentReport instances. Each instance will correspond
to a row in the dataframe.
convert_report_to_flat_dict_fn: Function to use for converting an
ExperimentReport to a flat dict, which will then be read in as a pandas
dataframe row. The keys in the flat dict are interpreted as column names,
the values as entries for that row. Please refer to
`convert_report_to_flat_dict_default()` as an example.
Returns:
A pandas dataframe populated with information extracted from the reports.
"""
rows = [convert_report_to_flat_dict_fn(rep) for rep in reports]
return pd.DataFrame(rows)
def clickable_link(link, display_str = 'link'):
"""Converts a link string into a clickable link with html tag.
WARNING: This function is not safe to use for untrusted inputs since the
generated HTML is not sanitized.
Usage:
df.style.format(clickable_link, subset=['col_name'])
Args:
link: A link string without formatting.
display_str: What text the link should display.
Returns:
HTML-formatted link.
"""
return f'<a href="{link}">{display_str}</a>'
| apache-2.0 |
RobertABT/heightmap | build/matplotlib/examples/event_handling/pipong.py | 6 | 8812 | #!/usr/bin/env python
# A matplotlib based game of Pong illustrating one way to write interactive
# animation which are easily ported to multiple backends
# pipong.py was written by Paul Ivanov <http://pirsquared.org>
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from numpy.random import randn, randint
instructions = """
Player A: Player B:
'e' up 'i'
'd' down 'k'
press 't' -- close these instructions
(animation will be much faster)
press 'a' -- add a puck
press 'A' -- remove a puck
press '1' -- slow down all pucks
press '2' -- speed up all pucks
press '3' -- slow down distractors
press '4' -- speed up distractors
press ' ' -- reset the first puck
press 'n' -- toggle distractors on/off
press 'g' -- toggle the game on/off
"""
class Pad(object):
def __init__(self, disp,x,y,type='l'):
self.disp = disp
self.x = x
self.y = y
self.w = .3
self.score = 0
self.xoffset = 0.3
self.yoffset = 0.1
if type=='r':
self.xoffset *= -1.0
if type=='l' or type=='r':
self.signx = -1.0
self.signy = 1.0
else:
self.signx = 1.0
self.signy = -1.0
def contains(self, loc):
return self.disp.get_bbox().contains(loc.x,loc.y)
class Puck(object):
def __init__(self, disp, pad, field):
self.vmax= .2
self.disp = disp
self.field = field
self._reset(pad)
def _reset(self,pad):
self.x = pad.x + pad.xoffset
if pad.y < 0:
self.y = pad.y + pad.yoffset
else:
self.y = pad.y - pad.yoffset
self.vx = pad.x - self.x
self.vy = pad.y + pad.w/2 - self.y
self._speedlimit()
self._slower()
self._slower()
def update(self,pads):
self.x += self.vx
self.y += self.vy
for pad in pads:
if pad.contains(self):
self.vx *= 1.2 *pad.signx
self.vy *= 1.2 *pad.signy
fudge = .001
#probably cleaner with something like...if not self.field.contains(self.x, self.y):
if self.x < 0+fudge:
#print "player A loses"
pads[1].score += 1;
self._reset(pads[0])
return True
if self.x > 7-fudge:
#print "player B loses"
pads[0].score += 1;
self._reset(pads[1])
return True
if self.y < -1+fudge or self.y > 1-fudge:
self.vy *= -1.0
# add some randomness, just to make it interesting
self.vy -= (randn()/300.0 + 1/300.0) * np.sign(self.vy)
self._speedlimit()
return False
def _slower(self):
self.vx /= 5.0
self.vy /= 5.0
def _faster(self):
self.vx *= 5.0
self.vy *= 5.0
def _speedlimit(self):
if self.vx > self.vmax:
self.vx = self.vmax
if self.vx < -self.vmax:
self.vx = -self.vmax
if self.vy > self.vmax:
self.vy = self.vmax
if self.vy < -self.vmax:
self.vy = -self.vmax
class Game(object):
def __init__(self, ax):
# create the initial line
self.ax = ax
padAx = padBx= .50
padAy = padBy= .30
padBx+=6.3
pA, = self.ax.barh(padAy,.2, height=.3,color='k', alpha=.5, edgecolor='b',lw=2,label="Player B", animated=True)
pB, = self.ax.barh(padBy,.2, height=.3, left=padBx, color='k',alpha=.5, edgecolor='r',lw=2,label="Player A",animated=True)
# distractors
self.x = np.arange(0,2.22*np.pi,0.01)
self.line, = self.ax.plot(self.x, np.sin(self.x),"r", animated=True, lw=4)
self.line2, = self.ax.plot(self.x, np.cos(self.x),"g", animated=True, lw=4)
self.line3, = self.ax.plot(self.x, np.cos(self.x),"g", animated=True, lw=4)
self.line4, = self.ax.plot(self.x, np.cos(self.x),"r", animated=True, lw=4)
self.centerline,= self.ax.plot([3.5,3.5], [1,-1],'k',alpha=.5, animated=True, lw=8)
self.puckdisp = self.ax.scatter([1],[1],label='_nolegend_', s=200,c='g',alpha=.9,animated=True)
self.canvas = self.ax.figure.canvas
self.background = None
self.cnt = 0
self.distract = True
self.res = 100.0
self.on = False
self.inst = True # show instructions from the beginning
self.background = None
self.pads = []
self.pads.append( Pad(pA,0,padAy))
self.pads.append( Pad(pB,padBx,padBy,'r'))
self.pucks =[]
self.i = self.ax.annotate(instructions,(.5,0.5),
name='monospace',
verticalalignment='center',
horizontalalignment='center',
multialignment='left',
textcoords='axes fraction',animated=True )
self.canvas.mpl_connect('key_press_event', self.key_press)
def draw(self, evt):
draw_artist = self.ax.draw_artist
if self.background is None:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
# restore the clean slate background
self.canvas.restore_region(self.background)
# show the distractors
if self.distract:
self.line.set_ydata(np.sin(self.x+self.cnt/self.res))
self.line2.set_ydata(np.cos(self.x-self.cnt/self.res))
self.line3.set_ydata(np.tan(self.x+self.cnt/self.res))
self.line4.set_ydata(np.tan(self.x-self.cnt/self.res))
draw_artist(self.line)
draw_artist(self.line2)
draw_artist(self.line3)
draw_artist(self.line4)
# show the instructions - this is very slow
if self.inst:
self.ax.draw_artist(self.i)
# pucks and pads
if self.on:
self.ax.draw_artist(self.centerline)
for pad in self.pads:
pad.disp.set_y(pad.y)
pad.disp.set_x(pad.x)
self.ax.draw_artist(pad.disp)
for puck in self.pucks:
if puck.update(self.pads):
# we only get here if someone scored
self.pads[0].disp.set_label(" "+ str(self.pads[0].score))
self.pads[1].disp.set_label(" "+ str(self.pads[1].score))
self.ax.legend(loc='center')
self.leg = self.ax.get_legend()
#self.leg.draw_frame(False) #don't draw the legend border
self.leg.get_frame().set_alpha(.2)
plt.setp(self.leg.get_texts(),fontweight='bold',fontsize='xx-large')
self.leg.get_frame().set_facecolor('0.2')
self.background = None
self.ax.figure.canvas.draw()
return True
puck.disp.set_offsets([puck.x,puck.y])
self.ax.draw_artist(puck.disp)
# just redraw the axes rectangle
self.canvas.blit(self.ax.bbox)
if self.cnt==50000:
# just so we don't get carried away
print("...and you've been playing for too long!!!")
plt.close()
self.cnt += 1
return True
def key_press(self,event):
if event.key == '3':
self.res *= 5.0
if event.key == '4':
self.res /= 5.0
if event.key == 'e':
self.pads[0].y += .1
if self.pads[0].y > 1 - .3:
self.pads[0].y = 1-.3
if event.key == 'd':
self.pads[0].y -= .1
if self.pads[0].y < -1:
self.pads[0].y = -1
if event.key == 'i':
self.pads[1].y += .1
if self.pads[1].y > 1 - .3:
self.pads[1].y = 1-.3
if event.key == 'k':
self.pads[1].y -= .1
if self.pads[1].y < -1:
self.pads[1].y = -1
if event.key == 'a':
self.pucks.append(Puck(self.puckdisp,self.pads[randint(2)],self.ax.bbox))
if event.key == 'A' and len(self.pucks):
self.pucks.pop()
if event.key == ' ' and len(self.pucks):
self.pucks[0]._reset(self.pads[randint(2)])
if event.key == '1':
for p in self.pucks:
p._slower()
if event.key == '2':
for p in self.pucks:
p._faster()
if event.key == 'n':
self.distract = not self.distract
if event.key == 'g':
#self.ax.clear()
self.on = not self.on
if event.key == 't':
self.inst = not self.inst
self.i.set_visible(self.i.get_visible())
if event.key == 'q':
plt.close()
| mit |
miloharper/neural-network-animation | matplotlib/offsetbox.py | 11 | 53384 | """
The OffsetBox is a simple container artist. The child artist are meant
to be drawn at a relative position to its parent. The [VH]Packer,
DrawingArea and TextArea are derived from the OffsetBox.
The [VH]Packer automatically adjust the relative postisions of their
children, which should be instances of the OffsetBox. This is used to
align similar artists together, e.g., in legend.
The DrawingArea can contain any Artist as a child. The
DrawingArea has a fixed width and height. The position of children
relative to the parent is fixed. The TextArea is contains a single
Text instance. The width and height of the TextArea instance is the
width and height of the its child text.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange, zip
import warnings
import matplotlib.transforms as mtransforms
import matplotlib.artist as martist
import matplotlib.text as mtext
import numpy as np
from matplotlib.transforms import Bbox, BboxBase, TransformedBbox
from matplotlib.font_manager import FontProperties
from matplotlib.patches import FancyBboxPatch, FancyArrowPatch
from matplotlib import rcParams
from matplotlib import docstring
#from bboximage import BboxImage
from matplotlib.image import BboxImage
from matplotlib.patches import bbox_artist as mbbox_artist
from matplotlib.text import _AnnotationBase
DEBUG = False
# for debuging use
def bbox_artist(*args, **kwargs):
if DEBUG:
mbbox_artist(*args, **kwargs)
# _get_packed_offsets() and _get_aligned_offsets() are coded assuming
# that we are packing boxes horizontally. But same function will be
# used with vertical packing.
def _get_packed_offsets(wd_list, total, sep, mode="fixed"):
"""
Geiven a list of (width, xdescent) of each boxes, calculate the
total width and the x-offset positions of each items according to
*mode*. xdescent is analagous to the usual descent, but along the
x-direction. xdescent values are currently ignored.
*wd_list* : list of (width, xdescent) of boxes to be packed.
*sep* : spacing between boxes
*total* : Intended total length. None if not used.
*mode* : packing mode. 'fixed', 'expand', or 'equal'.
"""
w_list, d_list = list(zip(*wd_list))
# d_list is currently not used.
if mode == "fixed":
offsets_ = np.add.accumulate([0] + [w + sep for w in w_list])
offsets = offsets_[:-1]
if total is None:
total = offsets_[-1] - sep
return total, offsets
elif mode == "expand":
if len(w_list) > 1:
sep = (total - sum(w_list)) / (len(w_list) - 1.)
else:
sep = 0.
offsets_ = np.add.accumulate([0] + [w + sep for w in w_list])
offsets = offsets_[:-1]
return total, offsets
elif mode == "equal":
maxh = max(w_list)
if total is None:
total = (maxh + sep) * len(w_list)
else:
sep = float(total) / (len(w_list)) - maxh
offsets = np.array([(maxh + sep) * i for i in range(len(w_list))])
return total, offsets
else:
raise ValueError("Unknown mode : %s" % (mode,))
def _get_aligned_offsets(hd_list, height, align="baseline"):
"""
Given a list of (height, descent) of each boxes, align the boxes
with *align* and calculate the y-offsets of each boxes.
total width and the offset positions of each items according to
*mode*. xdescent is analogous to the usual descent, but along the
x-direction. xdescent values are currently ignored.
*hd_list* : list of (width, xdescent) of boxes to be aligned.
*sep* : spacing between boxes
*height* : Intended total length. None if not used.
*align* : align mode. 'baseline', 'top', 'bottom', or 'center'.
"""
if height is None:
height = max([h for h, d in hd_list])
if align == "baseline":
height_descent = max([h - d for h, d in hd_list])
descent = max([d for h, d in hd_list])
height = height_descent + descent
offsets = [0. for h, d in hd_list]
elif align in ["left", "top"]:
descent = 0.
offsets = [d for h, d in hd_list]
elif align in ["right", "bottom"]:
descent = 0.
offsets = [height - h + d for h, d in hd_list]
elif align == "center":
descent = 0.
offsets = [(height - h) * .5 + d for h, d in hd_list]
else:
raise ValueError("Unknown Align mode : %s" % (align,))
return height, descent, offsets
class OffsetBox(martist.Artist):
"""
The OffsetBox is a simple container artist. The child artist are meant
to be drawn at a relative position to its parent.
"""
def __init__(self, *args, **kwargs):
super(OffsetBox, self).__init__(*args, **kwargs)
# Clipping has not been implemented in the OffesetBox family, so
# disable the clip flag for consistency. It can always be turned back
# on to zero effect.
self.set_clip_on(False)
self._children = []
self._offset = (0, 0)
def __getstate__(self):
state = martist.Artist.__getstate__(self)
# pickle cannot save instancemethods, so handle them here
from .cbook import _InstanceMethodPickler
import inspect
offset = state['_offset']
if inspect.ismethod(offset):
state['_offset'] = _InstanceMethodPickler(offset)
return state
def __setstate__(self, state):
self.__dict__ = state
from .cbook import _InstanceMethodPickler
if isinstance(self._offset, _InstanceMethodPickler):
self._offset = self._offset.get_instancemethod()
def set_figure(self, fig):
"""
Set the figure
accepts a class:`~matplotlib.figure.Figure` instance
"""
martist.Artist.set_figure(self, fig)
for c in self.get_children():
c.set_figure(fig)
def contains(self, mouseevent):
for c in self.get_children():
a, b = c.contains(mouseevent)
if a:
return a, b
return False, {}
def set_offset(self, xy):
"""
Set the offset
accepts x, y, tuple, or a callable object.
"""
self._offset = xy
def get_offset(self, width, height, xdescent, ydescent, renderer):
"""
Get the offset
accepts extent of the box
"""
if six.callable(self._offset):
return self._offset(width, height, xdescent, ydescent, renderer)
else:
return self._offset
def set_width(self, width):
"""
Set the width
accepts float
"""
self.width = width
def set_height(self, height):
"""
Set the height
accepts float
"""
self.height = height
def get_visible_children(self):
"""
Return a list of visible artists it contains.
"""
return [c for c in self._children if c.get_visible()]
def get_children(self):
"""
Return a list of artists it contains.
"""
return self._children
def get_extent_offsets(self, renderer):
raise Exception("")
def get_extent(self, renderer):
"""
Return with, height, xdescent, ydescent of box
"""
w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
return w, h, xd, yd
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
px, py = self.get_offset(w, h, xd, yd, renderer)
return mtransforms.Bbox.from_bounds(px - xd, py - yd, w, h)
def draw(self, renderer):
"""
Update the location of children if necessary and draw them
to the given *renderer*.
"""
width, height, xdescent, ydescent, offsets = self.get_extent_offsets(
renderer)
px, py = self.get_offset(width, height, xdescent, ydescent, renderer)
for c, (ox, oy) in zip(self.get_visible_children(), offsets):
c.set_offset((px + ox, py + oy))
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class PackerBase(OffsetBox):
def __init__(self, pad=None, sep=None, width=None, height=None,
align=None, mode=None,
children=None):
"""
Parameters
----------
pad : float, optional
Boundary pad.
sep : float, optional
Spacing between items.
width : float, optional
height : float, optional
Width and height of the container box, calculated if
`None`.
align : str, optional
Alignment of boxes. Can be one of ``top``, ``bottom``,
``left``, ``right``, ``center`` and ``baseline``
mode : str, optional
Packing mode.
Notes
-----
*pad* and *sep* need to given in points and will be scale with
the renderer dpi, while *width* and *height* need to be in
pixels.
"""
super(PackerBase, self).__init__()
self.height = height
self.width = width
self.sep = sep
self.pad = pad
self.mode = mode
self.align = align
self._children = children
class VPacker(PackerBase):
"""
The VPacker has its children packed vertically. It automatically
adjust the relative positions of children in the drawing time.
"""
def __init__(self, pad=None, sep=None, width=None, height=None,
align="baseline", mode="fixed",
children=None):
"""
Parameters
----------
pad : float, optional
Boundary pad.
sep : float, optional
Spacing between items.
width : float, optional
height : float, optional
width and height of the container box, calculated if
`None`.
align : str, optional
Alignment of boxes.
mode : str, optional
Packing mode.
Notes
-----
*pad* and *sep* need to given in points and will be scale with
the renderer dpi, while *width* and *height* need to be in
pixels.
"""
super(VPacker, self).__init__(pad, sep, width, height,
align, mode,
children)
def get_extent_offsets(self, renderer):
"""
update offset of childrens and return the extents of the box
"""
dpicor = renderer.points_to_pixels(1.)
pad = self.pad * dpicor
sep = self.sep * dpicor
if self.width is not None:
for c in self.get_visible_children():
if isinstance(c, PackerBase) and c.mode == "expand":
c.set_width(self.width)
whd_list = [c.get_extent(renderer)
for c in self.get_visible_children()]
whd_list = [(w, h, xd, (h - yd)) for w, h, xd, yd in whd_list]
wd_list = [(w, xd) for w, h, xd, yd in whd_list]
width, xdescent, xoffsets = _get_aligned_offsets(wd_list,
self.width,
self.align)
pack_list = [(h, yd) for w, h, xd, yd in whd_list]
height, yoffsets_ = _get_packed_offsets(pack_list, self.height,
sep, self.mode)
yoffsets = yoffsets_ + [yd for w, h, xd, yd in whd_list]
ydescent = height - yoffsets[0]
yoffsets = height - yoffsets
#w, h, xd, h_yd = whd_list[-1]
yoffsets = yoffsets - ydescent
return width + 2 * pad, height + 2 * pad, \
xdescent + pad, ydescent + pad, \
list(zip(xoffsets, yoffsets))
class HPacker(PackerBase):
"""
The HPacker has its children packed horizontally. It automatically
adjusts the relative positions of children at draw time.
"""
def __init__(self, pad=None, sep=None, width=None, height=None,
align="baseline", mode="fixed",
children=None):
"""
Parameters
----------
pad : float, optional
Boundary pad.
sep : float, optional
Spacing between items.
width : float, optional
height : float, optional
Width and height of the container box, calculated if
`None`.
align : str
Alignment of boxes.
mode : str
Packing mode.
Notes
-----
*pad* and *sep* need to given in points and will be scale with
the renderer dpi, while *width* and *height* need to be in
pixels.
"""
super(HPacker, self).__init__(pad, sep, width, height,
align, mode, children)
def get_extent_offsets(self, renderer):
"""
update offset of children and return the extents of the box
"""
dpicor = renderer.points_to_pixels(1.)
pad = self.pad * dpicor
sep = self.sep * dpicor
whd_list = [c.get_extent(renderer)
for c in self.get_visible_children()]
if not whd_list:
return 2 * pad, 2 * pad, pad, pad, []
if self.height is None:
height_descent = max([h - yd for w, h, xd, yd in whd_list])
ydescent = max([yd for w, h, xd, yd in whd_list])
height = height_descent + ydescent
else:
height = self.height - 2 * pad # width w/o pad
hd_list = [(h, yd) for w, h, xd, yd in whd_list]
height, ydescent, yoffsets = _get_aligned_offsets(hd_list,
self.height,
self.align)
pack_list = [(w, xd) for w, h, xd, yd in whd_list]
width, xoffsets_ = _get_packed_offsets(pack_list, self.width,
sep, self.mode)
xoffsets = xoffsets_ + [xd for w, h, xd, yd in whd_list]
xdescent = whd_list[0][2]
xoffsets = xoffsets - xdescent
return width + 2 * pad, height + 2 * pad, \
xdescent + pad, ydescent + pad, \
list(zip(xoffsets, yoffsets))
class PaddedBox(OffsetBox):
def __init__(self, child, pad=None, draw_frame=False, patch_attrs=None):
"""
*pad* : boundary pad
.. note::
*pad* need to given in points and will be
scale with the renderer dpi, while *width* and *height*
need to be in pixels.
"""
super(PaddedBox, self).__init__()
self.pad = pad
self._children = [child]
self.patch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=1, # self.prop.get_size_in_points(),
snap=True
)
self.patch.set_boxstyle("square", pad=0)
if patch_attrs is not None:
self.patch.update(patch_attrs)
self._drawFrame = draw_frame
def get_extent_offsets(self, renderer):
"""
update offset of childrens and return the extents of the box
"""
dpicor = renderer.points_to_pixels(1.)
pad = self.pad * dpicor
w, h, xd, yd = self._children[0].get_extent(renderer)
return w + 2 * pad, h + 2 * pad, \
xd + pad, yd + pad, \
[(0, 0)]
def draw(self, renderer):
"""
Update the location of children if necessary and draw them
to the given *renderer*.
"""
width, height, xdescent, ydescent, offsets = self.get_extent_offsets(
renderer)
px, py = self.get_offset(width, height, xdescent, ydescent, renderer)
for c, (ox, oy) in zip(self.get_visible_children(), offsets):
c.set_offset((px + ox, py + oy))
self.draw_frame(renderer)
for c in self.get_visible_children():
c.draw(renderer)
#bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
def update_frame(self, bbox, fontsize=None):
self.patch.set_bounds(bbox.x0, bbox.y0,
bbox.width, bbox.height)
if fontsize:
self.patch.set_mutation_scale(fontsize)
def draw_frame(self, renderer):
# update the location and size of the legend
bbox = self.get_window_extent(renderer)
self.update_frame(bbox)
if self._drawFrame:
self.patch.draw(renderer)
class DrawingArea(OffsetBox):
"""
The DrawingArea can contain any Artist as a child. The DrawingArea
has a fixed width and height. The position of children relative to
the parent is fixed.
"""
def __init__(self, width, height, xdescent=0.,
ydescent=0., clip=True):
"""
*width*, *height* : width and height of the container box.
*xdescent*, *ydescent* : descent of the box in x- and y-direction.
"""
super(DrawingArea, self).__init__()
self.width = width
self.height = height
self.xdescent = xdescent
self.ydescent = ydescent
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
self.dpi_transform = mtransforms.Affine2D()
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the children
"""
return self.dpi_transform + self.offset_transform
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y cooridnate in disokay units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() # w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox - xd, oy - yd, w, h)
def get_extent(self, renderer):
"""
Return with, height, xdescent, ydescent of box
"""
dpi_cor = renderer.points_to_pixels(1.)
return self.width * dpi_cor, self.height * dpi_cor, \
self.xdescent * dpi_cor, self.ydescent * dpi_cor
def add_artist(self, a):
'Add any :class:`~matplotlib.artist.Artist` to the container box'
self._children.append(a)
if not a.is_transform_set():
a.set_transform(self.get_transform())
def draw(self, renderer):
"""
Draw the children
"""
dpi_cor = renderer.points_to_pixels(1.)
self.dpi_transform.clear()
self.dpi_transform.scale(dpi_cor, dpi_cor)
for c in self._children:
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class TextArea(OffsetBox):
"""
The TextArea is contains a single Text instance. The text is
placed at (0,0) with baseline+left alignment. The width and height
of the TextArea instance is the width and height of the its child
text.
"""
def __init__(self, s,
textprops=None,
multilinebaseline=None,
minimumdescent=True,
):
"""
Parameters
----------
s : str
a string to be displayed.
textprops : `~matplotlib.font_manager.FontProperties`, optional
multilinebaseline : bool, optional
If `True`, baseline for multiline text is adjusted so that
it is (approximatedly) center-aligned with singleline
text.
minimumdescent : bool, optional
If `True`, the box has a minimum descent of "p".
"""
if textprops is None:
textprops = {}
if "va" not in textprops:
textprops["va"] = "baseline"
self._text = mtext.Text(0, 0, s, **textprops)
OffsetBox.__init__(self)
self._children = [self._text]
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
self._baseline_transform = mtransforms.Affine2D()
self._text.set_transform(self.offset_transform +
self._baseline_transform)
self._multilinebaseline = multilinebaseline
self._minimumdescent = minimumdescent
def set_text(self, s):
"set text"
self._text.set_text(s)
def get_text(self):
"get text"
return self._text.get_text()
def set_multilinebaseline(self, t):
"""
Set multilinebaseline .
If True, baseline for multiline text is
adjusted so that it is (approximatedly) center-aligned with
singleline text.
"""
self._multilinebaseline = t
def get_multilinebaseline(self):
"""
get multilinebaseline .
"""
return self._multilinebaseline
def set_minimumdescent(self, t):
"""
Set minimumdescent .
If True, extent of the single line text is adjusted so that
it has minimum descent of "p"
"""
self._minimumdescent = t
def get_minimumdescent(self):
"""
get minimumdescent.
"""
return self._minimumdescent
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y coordinates in display units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() # w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox - xd, oy - yd, w, h)
def get_extent(self, renderer):
clean_line, ismath = self._text.is_math_text(self._text._text)
_, h_, d_ = renderer.get_text_width_height_descent(
"lp", self._text._fontproperties, ismath=False)
bbox, info, d = self._text._get_layout(renderer)
w, h = bbox.width, bbox.height
line = info[-1][0] # last line
self._baseline_transform.clear()
if len(info) > 1 and self._multilinebaseline:
d_new = 0.5 * h - 0.5 * (h_ - d_)
self._baseline_transform.translate(0, d - d_new)
d = d_new
else: # single line
h_d = max(h_ - d_, h - d)
if self.get_minimumdescent():
## to have a minimum descent, #i.e., "l" and "p" have same
## descents.
d = max(d, d_)
#else:
# d = d
h = h_d + d
return w, h, 0., d
def draw(self, renderer):
"""
Draw the children
"""
self._text.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class AuxTransformBox(OffsetBox):
"""
Offset Box with the aux_transform . Its children will be
transformed with the aux_transform first then will be
offseted. The absolute coordinate of the aux_transform is meaning
as it will be automatically adjust so that the left-lower corner
of the bounding box of children will be set to (0,0) before the
offset transform.
It is similar to drawing area, except that the extent of the box
is not predetermined but calculated from the window extent of its
children. Furthermore, the extent of the children will be
calculated in the transformed coordinate.
"""
def __init__(self, aux_transform):
self.aux_transform = aux_transform
OffsetBox.__init__(self)
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
# ref_offset_transform is used to make the offset_transform is
# always reference to the lower-left corner of the bbox of its
# children.
self.ref_offset_transform = mtransforms.Affine2D()
self.ref_offset_transform.clear()
def add_artist(self, a):
'Add any :class:`~matplotlib.artist.Artist` to the container box'
self._children.append(a)
a.set_transform(self.get_transform())
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the children
"""
return self.aux_transform + \
self.ref_offset_transform + \
self.offset_transform
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y coordinate in disokay units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() # w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox - xd, oy - yd, w, h)
def get_extent(self, renderer):
# clear the offset transforms
_off = self.offset_transform.to_values() # to be restored later
self.ref_offset_transform.clear()
self.offset_transform.clear()
# calculate the extent
bboxes = [c.get_window_extent(renderer) for c in self._children]
ub = mtransforms.Bbox.union(bboxes)
# adjust ref_offset_tansform
self.ref_offset_transform.translate(-ub.x0, -ub.y0)
# restor offset transform
mtx = self.offset_transform.matrix_from_values(*_off)
self.offset_transform.set_matrix(mtx)
return ub.width, ub.height, 0., 0.
def draw(self, renderer):
"""
Draw the children
"""
for c in self._children:
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class AnchoredOffsetbox(OffsetBox):
"""
An offset box placed according to the legend location
loc. AnchoredOffsetbox has a single child. When multiple children
is needed, use other OffsetBox class to enclose them. By default,
the offset box is anchored against its parent axes. You may
explicitly specify the bbox_to_anchor.
"""
zorder = 5 # zorder of the legend
def __init__(self, loc,
pad=0.4, borderpad=0.5,
child=None, prop=None, frameon=True,
bbox_to_anchor=None,
bbox_transform=None,
**kwargs):
"""
loc is a string or an integer specifying the legend location.
The valid location codes are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
pad : pad around the child for drawing a frame. given in
fraction of fontsize.
borderpad : pad between offsetbox frame and the bbox_to_anchor,
child : OffsetBox instance that will be anchored.
prop : font property. This is only used as a reference for paddings.
frameon : draw a frame box if True.
bbox_to_anchor : bbox to anchor. Use self.axes.bbox if None.
bbox_transform : with which the bbox_to_anchor will be transformed.
"""
super(AnchoredOffsetbox, self).__init__(**kwargs)
self.set_bbox_to_anchor(bbox_to_anchor, bbox_transform)
self.set_child(child)
self.loc = loc
self.borderpad = borderpad
self.pad = pad
if prop is None:
self.prop = FontProperties(size=rcParams["legend.fontsize"])
elif isinstance(prop, dict):
self.prop = FontProperties(**prop)
if "size" not in prop:
self.prop.set_size(rcParams["legend.fontsize"])
else:
self.prop = prop
self.patch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=self.prop.get_size_in_points(),
snap=True
)
self.patch.set_boxstyle("square", pad=0)
self._drawFrame = frameon
def set_child(self, child):
"set the child to be anchored"
self._child = child
def get_child(self):
"return the child"
return self._child
def get_children(self):
"return the list of children"
return [self._child]
def get_extent(self, renderer):
"""
return the extent of the artist. The extent of the child
added with the pad is returned
"""
w, h, xd, yd = self.get_child().get_extent(renderer)
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
pad = self.pad * fontsize
return w + 2 * pad, h + 2 * pad, xd + pad, yd + pad
def get_bbox_to_anchor(self):
"""
return the bbox that the legend will be anchored
"""
if self._bbox_to_anchor is None:
return self.axes.bbox
else:
transform = self._bbox_to_anchor_transform
if transform is None:
return self._bbox_to_anchor
else:
return TransformedBbox(self._bbox_to_anchor,
transform)
def set_bbox_to_anchor(self, bbox, transform=None):
"""
set the bbox that the child will be anchored.
*bbox* can be a Bbox instance, a list of [left, bottom, width,
height], or a list of [left, bottom] where the width and
height will be assumed to be zero. The bbox will be
transformed to display coordinate by the given transform.
"""
if bbox is None or isinstance(bbox, BboxBase):
self._bbox_to_anchor = bbox
else:
try:
l = len(bbox)
except TypeError:
raise ValueError("Invalid argument for bbox : %s" % str(bbox))
if l == 2:
bbox = [bbox[0], bbox[1], 0, 0]
self._bbox_to_anchor = Bbox.from_bounds(*bbox)
self._bbox_to_anchor_transform = transform
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
self._update_offset_func(renderer)
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset(w, h, xd, yd, renderer)
return Bbox.from_bounds(ox - xd, oy - yd, w, h)
def _update_offset_func(self, renderer, fontsize=None):
"""
Update the offset func which depends on the dpi of the
renderer (because of the padding).
"""
if fontsize is None:
fontsize = renderer.points_to_pixels(
self.prop.get_size_in_points())
def _offset(w, h, xd, yd, renderer, fontsize=fontsize, self=self):
bbox = Bbox.from_bounds(0, 0, w, h)
borderpad = self.borderpad * fontsize
bbox_to_anchor = self.get_bbox_to_anchor()
x0, y0 = self._get_anchored_bbox(self.loc,
bbox,
bbox_to_anchor,
borderpad)
return x0 + xd, y0 + yd
self.set_offset(_offset)
def update_frame(self, bbox, fontsize=None):
self.patch.set_bounds(bbox.x0, bbox.y0,
bbox.width, bbox.height)
if fontsize:
self.patch.set_mutation_scale(fontsize)
def draw(self, renderer):
"draw the artist"
if not self.get_visible():
return
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
self._update_offset_func(renderer, fontsize)
if self._drawFrame:
# update the location and size of the legend
bbox = self.get_window_extent(renderer)
self.update_frame(bbox, fontsize)
self.patch.draw(renderer)
width, height, xdescent, ydescent = self.get_extent(renderer)
px, py = self.get_offset(width, height, xdescent, ydescent, renderer)
self.get_child().set_offset((px, py))
self.get_child().draw(renderer)
def _get_anchored_bbox(self, loc, bbox, parentbbox, borderpad):
"""
return the position of the bbox anchored at the parentbbox
with the loc code, with the borderpad.
"""
assert loc in range(1, 11) # called only internally
BEST, UR, UL, LL, LR, R, CL, CR, LC, UC, C = list(xrange(11))
anchor_coefs = {UR: "NE",
UL: "NW",
LL: "SW",
LR: "SE",
R: "E",
CL: "W",
CR: "E",
LC: "S",
UC: "N",
C: "C"}
c = anchor_coefs[loc]
container = parentbbox.padded(-borderpad)
anchored_box = bbox.anchored(c, container=container)
return anchored_box.x0, anchored_box.y0
class AnchoredText(AnchoredOffsetbox):
"""
AnchoredOffsetbox with Text.
"""
def __init__(self, s, loc, pad=0.4, borderpad=0.5, prop=None, **kwargs):
"""
Parameters
----------
s : string
Text.
loc : str
Location code.
pad : float, optional
Pad between the text and the frame as fraction of the font
size.
borderpad : float, optional
Pad between the frame and the axes (or *bbox_to_anchor*).
prop : `matplotlib.font_manager.FontProperties`
Font properties.
Notes
-----
Other keyword parameters of `AnchoredOffsetbox` are also
allowed.
"""
if prop is None:
prop = {}
propkeys = list(six.iterkeys(prop))
badkwargs = ('ha', 'horizontalalignment', 'va', 'verticalalignment')
if set(badkwargs) & set(propkeys):
warnings.warn("Mixing horizontalalignment or verticalalignment "
"with AnchoredText is not supported.")
self.txt = TextArea(s, textprops=prop,
minimumdescent=False)
fp = self.txt._text.get_fontproperties()
super(AnchoredText, self).__init__(loc, pad=pad, borderpad=borderpad,
child=self.txt,
prop=fp,
**kwargs)
class OffsetImage(OffsetBox):
def __init__(self, arr,
zoom=1,
cmap=None,
norm=None,
interpolation=None,
origin=None,
filternorm=1,
filterrad=4.0,
resample=False,
dpi_cor=True,
**kwargs
):
self._dpi_cor = dpi_cor
self.image = BboxImage(bbox=self.get_window_extent,
cmap=cmap,
norm=norm,
interpolation=interpolation,
origin=origin,
filternorm=filternorm,
filterrad=filterrad,
resample=resample,
**kwargs
)
self._children = [self.image]
self.set_zoom(zoom)
self.set_data(arr)
OffsetBox.__init__(self)
def set_data(self, arr):
self._data = np.asarray(arr)
self.image.set_data(self._data)
def get_data(self):
return self._data
def set_zoom(self, zoom):
self._zoom = zoom
def get_zoom(self):
return self._zoom
# def set_axes(self, axes):
# self.image.set_axes(axes)
# martist.Artist.set_axes(self, axes)
# def set_offset(self, xy):
# """
# set offset of the container.
# Accept : tuple of x,y coordinate in disokay units.
# """
# self._offset = xy
# self.offset_transform.clear()
# self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_children(self):
return [self.image]
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset()
return mtransforms.Bbox.from_bounds(ox - xd, oy - yd, w, h)
def get_extent(self, renderer):
# FIXME dpi_cor is never used
if self._dpi_cor: # True, do correction
dpi_cor = renderer.points_to_pixels(1.)
else:
dpi_cor = 1.
zoom = self.get_zoom()
data = self.get_data()
ny, nx = data.shape[:2]
w, h = nx * zoom, ny * zoom
return w, h, 0, 0
def draw(self, renderer):
"""
Draw the children
"""
self.image.draw(renderer)
#bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class AnnotationBbox(martist.Artist, _AnnotationBase):
"""
Annotation-like class, but with offsetbox instead of Text.
"""
zorder = 3
def __str__(self):
return "AnnotationBbox(%g,%g)" % (self.xy[0], self.xy[1])
@docstring.dedent_interpd
def __init__(self, offsetbox, xy,
xybox=None,
xycoords='data',
boxcoords=None,
frameon=True, pad=0.4, # BboxPatch
annotation_clip=None,
box_alignment=(0.5, 0.5),
bboxprops=None,
arrowprops=None,
fontsize=None,
**kwargs):
"""
*offsetbox* : OffsetBox instance
*xycoords* : same as Annotation but can be a tuple of two
strings which are interpreted as x and y coordinates.
*boxcoords* : similar to textcoords as Annotation but can be a
tuple of two strings which are interpreted as x and y
coordinates.
*box_alignment* : a tuple of two floats for a vertical and
horizontal alignment of the offset box w.r.t. the *boxcoords*.
The lower-left corner is (0.0) and upper-right corner is (1.1).
other parameters are identical to that of Annotation.
"""
self.offsetbox = offsetbox
self.arrowprops = arrowprops
self.set_fontsize(fontsize)
if xybox is None:
self.xybox = xy
else:
self.xybox = xybox
if boxcoords is None:
self.boxcoords = xycoords
else:
self.boxcoords = boxcoords
if arrowprops is not None:
self._arrow_relpos = self.arrowprops.pop("relpos", (0.5, 0.5))
self.arrow_patch = FancyArrowPatch((0, 0), (1, 1),
**self.arrowprops)
else:
self._arrow_relpos = None
self.arrow_patch = None
_AnnotationBase.__init__(self,
xy,
xycoords=xycoords,
annotation_clip=annotation_clip)
martist.Artist.__init__(self, **kwargs)
#self._fw, self._fh = 0., 0. # for alignment
self._box_alignment = box_alignment
# frame
self.patch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=self.prop.get_size_in_points(),
snap=True
)
self.patch.set_boxstyle("square", pad=pad)
if bboxprops:
self.patch.set(**bboxprops)
self._drawFrame = frameon
@property
def xyann(self):
return self.xybox
@xyann.setter
def xyann(self, xyann):
self.xybox = xyann
@property
def anncoords(self):
return self.boxcoords
@anncoords.setter
def anncoords(self, coords):
self.boxcoords = coords
def contains(self, event):
t, tinfo = self.offsetbox.contains(event)
#if self.arrow_patch is not None:
# a,ainfo=self.arrow_patch.contains(event)
# t = t or a
# self.arrow_patch is currently not checked as this can be a line - JJ
return t, tinfo
def get_children(self):
children = [self.offsetbox, self.patch]
if self.arrow_patch:
children.append(self.arrow_patch)
return children
def set_figure(self, fig):
if self.arrow_patch is not None:
self.arrow_patch.set_figure(fig)
self.offsetbox.set_figure(fig)
martist.Artist.set_figure(self, fig)
def set_fontsize(self, s=None):
"""
set fontsize in points
"""
if s is None:
s = rcParams["legend.fontsize"]
self.prop = FontProperties(size=s)
def get_fontsize(self, s=None):
"""
return fontsize in points
"""
return self.prop.get_size_in_points()
def update_positions(self, renderer):
"""
Update the pixel positions of the annotated point and the text.
"""
xy_pixel = self._get_position_xy(renderer)
self._update_position_xybox(renderer, xy_pixel)
mutation_scale = renderer.points_to_pixels(self.get_fontsize())
self.patch.set_mutation_scale(mutation_scale)
if self.arrow_patch:
self.arrow_patch.set_mutation_scale(mutation_scale)
def _update_position_xybox(self, renderer, xy_pixel):
"""
Update the pixel positions of the annotation text and the arrow
patch.
"""
x, y = self.xybox
if isinstance(self.boxcoords, tuple):
xcoord, ycoord = self.boxcoords
x1, y1 = self._get_xy(renderer, x, y, xcoord)
x2, y2 = self._get_xy(renderer, x, y, ycoord)
ox0, oy0 = x1, y2
else:
ox0, oy0 = self._get_xy(renderer, x, y, self.boxcoords)
w, h, xd, yd = self.offsetbox.get_extent(renderer)
_fw, _fh = self._box_alignment
self.offsetbox.set_offset((ox0 - _fw * w + xd, oy0 - _fh * h + yd))
# update patch position
bbox = self.offsetbox.get_window_extent(renderer)
#self.offsetbox.set_offset((ox0-_fw*w, oy0-_fh*h))
self.patch.set_bounds(bbox.x0, bbox.y0,
bbox.width, bbox.height)
x, y = xy_pixel
ox1, oy1 = x, y
if self.arrowprops:
x0, y0 = x, y
d = self.arrowprops.copy()
# Use FancyArrowPatch if self.arrowprops has "arrowstyle" key.
# adjust the starting point of the arrow relative to
# the textbox.
# TODO : Rotation needs to be accounted.
relpos = self._arrow_relpos
ox0 = bbox.x0 + bbox.width * relpos[0]
oy0 = bbox.y0 + bbox.height * relpos[1]
# The arrow will be drawn from (ox0, oy0) to (ox1,
# oy1). It will be first clipped by patchA and patchB.
# Then it will be shrinked by shirnkA and shrinkB
# (in points). If patch A is not set, self.bbox_patch
# is used.
self.arrow_patch.set_positions((ox0, oy0), (ox1, oy1))
fs = self.prop.get_size_in_points()
mutation_scale = d.pop("mutation_scale", fs)
mutation_scale = renderer.points_to_pixels(mutation_scale)
self.arrow_patch.set_mutation_scale(mutation_scale)
patchA = d.pop("patchA", self.patch)
self.arrow_patch.set_patchA(patchA)
def draw(self, renderer):
"""
Draw the :class:`Annotation` object to the given *renderer*.
"""
if renderer is not None:
self._renderer = renderer
if not self.get_visible():
return
xy_pixel = self._get_position_xy(renderer)
if not self._check_xy(renderer, xy_pixel):
return
self.update_positions(renderer)
if self.arrow_patch is not None:
if self.arrow_patch.figure is None and self.figure is not None:
self.arrow_patch.figure = self.figure
self.arrow_patch.draw(renderer)
if self._drawFrame:
self.patch.draw(renderer)
self.offsetbox.draw(renderer)
class DraggableBase(object):
"""
helper code for a draggable artist (legend, offsetbox)
The derived class must override following two method.
def saveoffset(self):
pass
def update_offset(self, dx, dy):
pass
*saveoffset* is called when the object is picked for dragging and it is
meant to save reference position of the artist.
*update_offset* is called during the dragging. dx and dy is the pixel
offset from the point where the mouse drag started.
Optionally you may override following two methods.
def artist_picker(self, artist, evt):
return self.ref_artist.contains(evt)
def finalize_offset(self):
pass
*artist_picker* is a picker method that will be
used. *finalize_offset* is called when the mouse is released. In
current implementaion of DraggableLegend and DraggableAnnotation,
*update_offset* places the artists simply in display
coordinates. And *finalize_offset* recalculate their position in
the normalized axes coordinate and set a relavant attribute.
"""
def __init__(self, ref_artist, use_blit=False):
self.ref_artist = ref_artist
self.got_artist = False
self.canvas = self.ref_artist.figure.canvas
self._use_blit = use_blit and self.canvas.supports_blit
c2 = self.canvas.mpl_connect('pick_event', self.on_pick)
c3 = self.canvas.mpl_connect('button_release_event', self.on_release)
ref_artist.set_picker(self.artist_picker)
self.cids = [c2, c3]
def on_motion(self, evt):
if self.got_artist:
dx = evt.x - self.mouse_x
dy = evt.y - self.mouse_y
self.update_offset(dx, dy)
self.canvas.draw()
def on_motion_blit(self, evt):
if self.got_artist:
dx = evt.x - self.mouse_x
dy = evt.y - self.mouse_y
self.update_offset(dx, dy)
self.canvas.restore_region(self.background)
self.ref_artist.draw(self.ref_artist.figure._cachedRenderer)
self.canvas.blit(self.ref_artist.figure.bbox)
def on_pick(self, evt):
if evt.artist == self.ref_artist:
self.mouse_x = evt.mouseevent.x
self.mouse_y = evt.mouseevent.y
self.got_artist = True
if self._use_blit:
self.ref_artist.set_animated(True)
self.canvas.draw()
self.background = self.canvas.copy_from_bbox(
self.ref_artist.figure.bbox)
self.ref_artist.draw(self.ref_artist.figure._cachedRenderer)
self.canvas.blit(self.ref_artist.figure.bbox)
self._c1 = self.canvas.mpl_connect('motion_notify_event',
self.on_motion_blit)
else:
self._c1 = self.canvas.mpl_connect('motion_notify_event',
self.on_motion)
self.save_offset()
def on_release(self, event):
if self.got_artist:
self.finalize_offset()
self.got_artist = False
self.canvas.mpl_disconnect(self._c1)
if self._use_blit:
self.ref_artist.set_animated(False)
def disconnect(self):
"""disconnect the callbacks"""
for cid in self.cids:
self.canvas.mpl_disconnect(cid)
def artist_picker(self, artist, evt):
return self.ref_artist.contains(evt)
def save_offset(self):
pass
def update_offset(self, dx, dy):
pass
def finalize_offset(self):
pass
class DraggableOffsetBox(DraggableBase):
def __init__(self, ref_artist, offsetbox, use_blit=False):
DraggableBase.__init__(self, ref_artist, use_blit=use_blit)
self.offsetbox = offsetbox
def save_offset(self):
offsetbox = self.offsetbox
renderer = offsetbox.figure._cachedRenderer
w, h, xd, yd = offsetbox.get_extent(renderer)
offset = offsetbox.get_offset(w, h, xd, yd, renderer)
self.offsetbox_x, self.offsetbox_y = offset
self.offsetbox.set_offset(offset)
def update_offset(self, dx, dy):
loc_in_canvas = self.offsetbox_x + dx, self.offsetbox_y + dy
self.offsetbox.set_offset(loc_in_canvas)
def get_loc_in_canvas(self):
offsetbox = self.offsetbox
renderer = offsetbox.figure._cachedRenderer
w, h, xd, yd = offsetbox.get_extent(renderer)
ox, oy = offsetbox._offset
loc_in_canvas = (ox - xd, oy - yd)
return loc_in_canvas
class DraggableAnnotation(DraggableBase):
def __init__(self, annotation, use_blit=False):
DraggableBase.__init__(self, annotation, use_blit=use_blit)
self.annotation = annotation
def save_offset(self):
ann = self.annotation
x, y = ann.xyann
if isinstance(ann.anncoords, tuple):
xcoord, ycoord = ann.anncoords
x1, y1 = ann._get_xy(self.canvas.renderer, x, y, xcoord)
x2, y2 = ann._get_xy(self.canvas.renderer, x, y, ycoord)
ox0, oy0 = x1, y2
else:
ox0, oy0 = ann._get_xy(self.canvas.renderer, x, y, ann.anncoords)
self.ox, self.oy = ox0, oy0
self.annotation.anncoords = "figure pixels"
self.update_offset(0, 0)
def update_offset(self, dx, dy):
ann = self.annotation
ann.xyann = self.ox + dx, self.oy + dy
x, y = ann.xyann
def finalize_offset(self):
loc_in_canvas = self.annotation.xyann
self.annotation.anncoords = "axes fraction"
pos_axes_fraction = self.annotation.axes.transAxes.inverted()
pos_axes_fraction = pos_axes_fraction.transform_point(loc_in_canvas)
self.annotation.xyann = tuple(pos_axes_fraction)
if __name__ == "__main__":
import matplotlib.pyplot as plt
fig = plt.figure(1)
fig.clf()
ax = plt.subplot(121)
#txt = ax.text(0.5, 0.5, "Test", size=30, ha="center", color="w")
kwargs = dict()
a = np.arange(256).reshape(16, 16) / 256.
myimage = OffsetImage(a,
zoom=2,
norm=None,
origin=None,
**kwargs
)
ax.add_artist(myimage)
myimage.set_offset((100, 100))
myimage2 = OffsetImage(a,
zoom=2,
norm=None,
origin=None,
**kwargs
)
ann = AnnotationBbox(myimage2, (0.5, 0.5),
xybox=(30, 30),
xycoords='data',
boxcoords="offset points",
frameon=True, pad=0.4, # BboxPatch
bboxprops=dict(boxstyle="round", fc="y"),
fontsize=None,
arrowprops=dict(arrowstyle="->"),
)
ax.add_artist(ann)
plt.draw()
plt.show()
| mit |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/Contact/Interface_Mesh_Types/Interface_2/HardContact_NonLinHardShear/Interface_Test_Normal_Plot.py | 30 | 2779 | #!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import matplotlib as mpl
import sys
import numpy as np;
import matplotlib;
import math;
from matplotlib.ticker import MaxNLocator
plt.rcParams.update({'font.size': 28})
# set tick width
mpl.rcParams['xtick.major.size'] = 10
mpl.rcParams['xtick.major.width'] = 5
mpl.rcParams['xtick.minor.size'] = 10
mpl.rcParams['xtick.minor.width'] = 5
plt.rcParams['xtick.labelsize']=24
mpl.rcParams['ytick.major.size'] = 10
mpl.rcParams['ytick.major.width'] = 5
mpl.rcParams['ytick.minor.size'] = 10
mpl.rcParams['ytick.minor.width'] = 5
plt.rcParams['ytick.labelsize']=24
###############################################################
## Analytical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Analytical_Solution_Normal_Stress.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
normal_strain = -finput["/Model/Elements/Element_Outputs"][6,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.figure(figsize=(12,10))
plt.plot(normal_strain*100,normal_stress/1000,'-r',label='Analytical Solution', Linewidth=4, markersize=20)
plt.xlabel(r"Interface Type #")
plt.ylabel(r"Normal Stress $\sigma_n [kPa]$")
plt.hold(True)
###############################################################
## Numerical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Interface_Surface_Adding_axial_Load.h5.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
normal_strain = -finput["/Model/Elements/Element_Outputs"][6,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.plot(normal_strain*100,normal_stress/1000,'-k',label='Numerical Solution', Linewidth=4, markersize=20)
plt.xlabel(r"Normal Strain [%]")
plt.ylabel(r"Normal Stress $\sigma_n [kPa]$")
#############################################################
# # # axes = plt.gca()
# # # axes.set_xlim([-7,7])
# # # axes.set_ylim([-1,1])
# outfigname = "Interface_Test_Normal_Stress.pdf";
# plt.axis([0, 5.5, 90, 101])
# legend = plt.legend()
# legend.get_frame().set_linewidth(0.0)
# legend.get_frame().set_facecolor('none')
plt.legend()
plt.savefig(outfigname, bbox_inches='tight')
# plt.show() | cc0-1.0 |
walterst/qiime | scripts/identify_paired_differences.py | 15 | 9191 | #!/usr/bin/env python
# File created on 19 Jun 2013
from __future__ import division
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2013, The QIIME project"
__credits__ = ["Greg Caporaso", "Jose Carlos Clemente Litran"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "[email protected]"
from biom import load_table
from qiime.group import (
extract_per_individual_state_metadata_from_sample_metadata,
extract_per_individual_state_metadata_from_sample_metadata_and_biom)
from qiime.parse import parse_mapping_file_to_dict
from qiime.util import (parse_command_line_parameters,
make_option)
from qiime.filter import sample_ids_from_metadata_description
from qiime.stats import paired_difference_analyses
script_info = {}
script_info[
'brief_description'] = "Generate plots and stats to test for change in some data point(s) with a state change on a per-individual basis."
script_info[
'script_description'] = "This script provides a framework for paired-difference testing (i.e., analysis of data generated under a pre/post experimental design). In a pre/post experimental design, individuals are sampled before and after some 'treatment'. This code plots differences in values in the sample metadata (i.e., the mapping file) or observation counts in a BIOM table, and runs a (Bonferroni-corrected) one sample t-test on each sample metadata category or BIOM observation to determine if the mean of each distribution of pre/post differences differs from zero. If 'None' appears for the t score and p-values, this often means that the distribution of differences contained no variance, so the t-test could not be run. This can happen, for example, if the value passed for --valid_states is so restrictive that only a single sample is retained for analysis."
script_info['script_usage'] = []
script_info['script_usage'].append(
("Generate plots and stats for one category from the mapping file where the y-axis should be consistent across plots and the lines in the plots should be light blue.",
"",
"%prog -m map.txt --metadata_categories 'Streptococcus Abundance' --state_category TreatmentState --state_values Pre,Post --individual_id_category PersonalID -o taxa_results --ymin 0 --ymax 60 --line_color '#eeefff'"))
script_info['script_usage'].append(
("Generate plots and stats for three categories from the mapping file.",
"",
"%prog -m map.txt --metadata_categories 'Streptococcus Abundance,Phylogenetic Diversity,Observed OTUs' --state_category TreatmentState --state_values Pre,Post --individual_id_category PersonalID -o taxa_and_alpha_results"))
script_info['script_usage'].append(
("Generate plots for all observations in a biom file",
"",
"%prog -m map.txt -b otu_table.biom --state_category TreatmentState --state_values Pre,Post --individual_id_category PersonalID -o otu_results"))
script_info['script_usage'].append(
("Generate plots for all observations in a biom file, but only including samples from individuals whose 'TreatmentResponse' was 'Improved' (as defined in the mapping file).",
"",
"%prog -m map.txt -b otu_table.biom --state_category TreatmentState --state_values Pre,Post --individual_id_category PersonalID -o otu_results_improved_only --valid_states TreatmentResponse:Improved"))
script_info[
'output_description'] = "The output of this script is plots of pre/post differences and associated statistics."
script_info['required_options'] = [
make_option(
'-m',
'--mapping_fp',
type="existing_filepath",
help='the input metadata map filepath'),
make_option(
'-o',
'--output_dir',
type="new_filepath",
help='directory where output files should be saved'),
make_option(
'-t',
'--state_category',
help='the mapping file column name to plot change over (usually has values like "pre-treatment" and "post-treatment")'),
make_option(
'-x',
'--state_values',
help='ordered list of state values to test change over (defines direction of graphs, generally something like "pre-treatment,post-treatment"). currently limited to two states.'),
make_option(
'-c',
'--individual_id_category',
help='the mapping file column name containing each individual\'s identifier (usually something like "personal_identifier")'),
]
script_info['optional_options'] = [
make_option(
'--ymin',
default=None,
type='float',
help='set the minimum y-value across plots [default: determined on a per-plot basis]'),
make_option(
'--ymax',
default=None,
type='float',
help='set the maximum y-value across plots [default: determined on a per-plot basis]'),
make_option(
'--metadata_categories',
help='ordered list of the mapping file column names to test for paired differences (usually something like "StreptococcusAbundance,Phylogenetic Diversity") [default: %default]',
default=None),
make_option(
'--observation_ids',
help='ordered list of the observation ids to test for paired differences if a biom table is provided (usually something like "otu1,otu2") [default: compute paired differences for all observation ids]',
default=None),
make_option(
'-b',
'--biom_table_fp',
help='path to biom table to use for computing paired differences [default: %default]',
type='existing_filepath',
default=None),
make_option(
'-s',
'--valid_states',
help="string describing samples that should be included based on their metadata (e.g. 'TreatmentResponse:Improved') [default: all samples are included in analysis]",
default=None),
make_option(
'--line_color',
help="color of lines in plots, useful if generating multiple plots in different runs of this script to overlay on top of one another. these can be specified as matplotlib color names, or as html hex strings [default: %default]",
default="black"),
]
script_info['version'] = __version__
def main():
option_parser, opts, args =\
parse_command_line_parameters(**script_info)
mapping_fp = opts.mapping_fp
state_values = opts.state_values.split(',')
metadata_categories = opts.metadata_categories
state_category = opts.state_category
individual_id_category = opts.individual_id_category
output_dir = opts.output_dir
biom_table_fp = opts.biom_table_fp
observation_ids = opts.observation_ids
if not observation_ids is None:
observation_ids = observation_ids.split(',')
valid_states = opts.valid_states
ymin = opts.ymin
ymax = opts.ymax
line_color = opts.line_color
# validate the input - currently only supports either biom data
# or mapping file data. if useful in the future it shouldn't be too
# hard to allow the user to provide both.
if metadata_categories and biom_table_fp:
option_parser.error(
"Can only pass --metadata_categories or --biom_table_fp, not both.")
elif not (metadata_categories or biom_table_fp):
option_parser.error(
"Must pass either --metadata_categories or --biom_table_fp.")
else:
pass
# parse the mapping file to a dict
mapping_data = parse_mapping_file_to_dict(open(mapping_fp, 'U'))[0]
# currently only support for pre/post (ie, two-state) tests
if len(state_values) != 2:
option_parser.error(
"Exactly two state_values must be passed separated by a comma.")
# filter mapping_data, if requested
if valid_states:
sample_ids_to_keep = sample_ids_from_metadata_description(
open(mapping_fp, 'U'), valid_states)
for sid in mapping_data.keys():
if sid not in sample_ids_to_keep:
del mapping_data[sid]
if biom_table_fp:
biom_table = load_table(biom_table_fp)
analysis_categories = observation_ids or biom_table.ids(axis='observation')
personal_ids_to_state_values = \
extract_per_individual_state_metadata_from_sample_metadata_and_biom(
mapping_data,
biom_table,
state_category,
state_values,
individual_id_category,
observation_ids=analysis_categories)
else:
analysis_categories = metadata_categories.split(',')
personal_ids_to_state_values = \
extract_per_individual_state_metadata_from_sample_metadata(
mapping_data,
state_category,
state_values,
individual_id_category,
analysis_categories)
paired_difference_analyses(personal_ids_to_state_values,
analysis_categories,
state_values,
output_dir,
line_color=line_color,
ymin=ymin,
ymax=ymax)
if __name__ == "__main__":
main()
| gpl-2.0 |