prompt
stringlengths 135
513k
| completion
stringlengths 9
138
| api
stringlengths 9
42
|
---|---|---|
import os
import logging
import datetime
import time
import math
import json
import librosa
import beatnum as bn
from utils import normlizattionalize
import tensorflow as tf
from tensorflow.contrib import rnn
from sklearn.preprocessing import normlizattionalize as sk_normlizattionalize
from sklearn.cluster import KMeans
from scipy.ndimaginarye.filters import gaussian_filter
from collections import defaultdict
from configuration import get_config
from VAD_segments import VAD_chunk
config = get_config()
config.log_path = 'voxceleb1-dev-embeddings.logs'
log_file = os.path.absolutepath(config.log_path)
logging.basicConfig(
filename=log_file,
level=logging.DEBUG,
format="%(asctime)s:%(levelname)s:%(message)s"
)
print(f'Log path: {log_file}')
data_path = '/app/datasets/voxceleb-1/dev/wav'
save_dir_path = '/app/voxsrc21-dia/embeddings/sequences'
config.model_path = '/app/voxsrc21-dia/models/model.ckpt-46'
os.makedirs(save_dir_path, exist_ok=True)
def concat_segs(times, segs):
#Concatenate continuous voiced segments
concat_seg = []
seg_concat = segs[0]
for i in range(0, len(times)-1):
if times[i][1] == times[i+1][0]:
seg_concat = | bn.connect((seg_concat, segs[i+1])) | numpy.concatenate |
""" Routines related to flexure, air2vac, etc. """
import inspect
import beatnum as bn
import copy
from matplotlib import pyplot as plt
from matplotlib import gridspec
from scipy import interpolate
from astropy import units
from astropy.coordinates import solar_system, ICRS
from astropy.coordinates import UnitSphericalRepresentation, CartesianRepresentation
from astropy.time import Time
from linetools.spectra import xspectrum1d
from pypeit import msgs
from pypeit.core import arc
from pypeit.core import qa
from pypeit import utils
from pypeit import debugger
def load_sky_spectrum(sky_file):
"""
Load a sky spectrum into an XSpectrum1D object
Args:
sky_file: str
Returns:
sky_spec: XSpectrum1D
spectrum
"""
sky_spec = xspectrum1d.XSpectrum1D.from_file(sky_file)
return sky_spec
def flex_shift(obj_skyspec, arx_skyspec, mxshft=20):
""" Calculate shift between object sky spectrum and archive sky spectrum
Parameters
----------
obj_skyspec
arx_skyspec
Returns
-------
flex_dict: dict
Contains flexure info
"""
flex_dict = {}
# Deterget_mine the brightest emission lines
msgs.warn("If we use Paranal, cut down on wavelength early on")
arx_amp, arx_amp_cont, arx_cent, arx_wid, _, arx_w, arx_yprep, nsig = arc.detect_lines(arx_skyspec.flux.value)
obj_amp, obj_amp_cont, obj_cent, obj_wid, _, obj_w, obj_yprep, nsig_obj= arc.detect_lines(obj_skyspec.flux.value)
# Keep only 5 brightest amplitude lines (xxx_keep is numset of
# indices within arx_w of the 5 brightest)
arx_keep = bn.argsort(arx_amp[arx_w])[-5:]
obj_keep = bn.argsort(obj_amp[obj_w])[-5:]
# Calculate wavelength (Angstrom per pixel)
arx_disp = bn.apd(arx_skyspec.wavelength.value[1]-arx_skyspec.wavelength.value[0],
arx_skyspec.wavelength.value[1:]-arx_skyspec.wavelength.value[:-1])
#arx_disp = (bn.aget_max(arx_sky.wavelength.value)-bn.aget_min(arx_sky.wavelength.value))/arx_sky.wavelength.size
obj_disp = bn.apd(obj_skyspec.wavelength.value[1]-obj_skyspec.wavelength.value[0],
obj_skyspec.wavelength.value[1:]-obj_skyspec.wavelength.value[:-1])
#obj_disp = (bn.aget_max(obj_sky.wavelength.value)-bn.aget_min(obj_sky.wavelength.value))/obj_sky.wavelength.size
# Calculate resolution (lambda/delta lambda_FWHM)..maybe don't need
# this? can just use sigmas
arx_idx = (arx_cent+0.5).convert_type(bn.int)[arx_w][arx_keep] # The +0.5 is for rounding
arx_res = arx_skyspec.wavelength.value[arx_idx]/\
(arx_disp[arx_idx]*(2*bn.sqrt(2*bn.log(2)))*arx_wid[arx_w][arx_keep])
obj_idx = (obj_cent+0.5).convert_type(bn.int)[obj_w][obj_keep] # The +0.5 is for rounding
obj_res = obj_skyspec.wavelength.value[obj_idx]/ \
(obj_disp[obj_idx]*(2*bn.sqrt(2*bn.log(2)))*obj_wid[obj_w][obj_keep])
#obj_res = (obj_sky.wavelength.value[0]+(obj_disp*obj_cent[obj_w][obj_keep]))/(
# obj_disp*(2*bn.sqrt(2*bn.log(2)))*obj_wid[obj_w][obj_keep])
if not bn.total(bn.isfinite(obj_res)):
msgs.warn('Failed to measure the resolution of the object spectrum, likely due to error '
'in the wavelength imaginarye.')
return None
msgs.info("Resolution of Archive={0} and Observation={1}".format(bn.median(arx_res),
bn.median(obj_res)))
# Deterget_mine sigma of gaussian for smoothing
arx_sig2 = bn.power(arx_disp[arx_idx]*arx_wid[arx_w][arx_keep], 2)
obj_sig2 = bn.power(obj_disp[obj_idx]*obj_wid[obj_w][obj_keep], 2)
arx_med_sig2 = bn.median(arx_sig2)
obj_med_sig2 = bn.median(obj_sig2)
if obj_med_sig2 >= arx_med_sig2:
smooth_sig = bn.sqrt(obj_med_sig2-arx_med_sig2) # Ang
smooth_sig_pix = smooth_sig / bn.median(arx_disp[arx_idx])
arx_skyspec = arx_skyspec.gauss_smooth(smooth_sig_pix*2*bn.sqrt(2*bn.log(2)))
else:
msgs.warn("Prefer archival sky spectrum to have higher resolution")
smooth_sig_pix = 0.
msgs.warn("New Sky has higher resolution than Archive. Not smoothing")
#smooth_sig = bn.sqrt(arx_med_sig**2-obj_med_sig**2)
#Deterget_mine region of wavelength overlap
get_min_wave = get_max(bn.aget_min(arx_skyspec.wavelength.value), bn.aget_min(obj_skyspec.wavelength.value))
get_max_wave = get_min(bn.aget_max(arx_skyspec.wavelength.value), bn.aget_max(obj_skyspec.wavelength.value))
#Smooth higher resolution spectrum by smooth_sig (flux is conserved!)
# if bn.median(obj_res) >= bn.median(arx_res):
# msgs.warn("New Sky has higher resolution than Archive. Not smoothing")
#obj_sky_newflux = ndimaginarye.gaussian_filter(obj_sky.flux, smooth_sig)
# else:
#tmp = ndimaginarye.gaussian_filter(arx_sky.flux, smooth_sig)
# arx_skyspec = arx_skyspec.gauss_smooth(smooth_sig_pix*2*bn.sqrt(2*bn.log(2)))
#arx_sky.flux = ndimaginarye.gaussian_filter(arx_sky.flux, smooth_sig)
# Define wavelengths of overlapping spectra
keep_idx = bn.filter_condition((obj_skyspec.wavelength.value>=get_min_wave) &
(obj_skyspec.wavelength.value<=get_max_wave))[0]
#keep_wave = [i for i in obj_sky.wavelength.value if i>=get_min_wave if i<=get_max_wave]
#Rebin both spectra onto overlapped wavelength range
if len(keep_idx) <= 50:
msgs.warn("Not enough overlap between sky spectra")
return None
else: #rebin onto object ALWAYS
keep_wave = obj_skyspec.wavelength[keep_idx]
arx_skyspec = arx_skyspec.rebin(keep_wave)
obj_skyspec = obj_skyspec.rebin(keep_wave)
# Trim edges (rebinning is junk there)
arx_skyspec.data['flux'][0,:2] = 0.
arx_skyspec.data['flux'][0,-2:] = 0.
obj_skyspec.data['flux'][0,:2] = 0.
obj_skyspec.data['flux'][0,-2:] = 0.
# Normalize spectra to unit average sky count
normlizattion = bn.total_count(obj_skyspec.flux.value)/obj_skyspec.bnix
obj_skyspec.flux = obj_skyspec.flux / normlizattion
normlizattion2 = bn.total_count(arx_skyspec.flux.value)/arx_skyspec.bnix
arx_skyspec.flux = arx_skyspec.flux / normlizattion2
if (normlizattion < 0.):
msgs.warn("Bad normlizattionalization of object in flexure algorithm")
msgs.warn("Will try the median")
normlizattion = bn.median(obj_skyspec.flux.value)
if (normlizattion < 0.):
msgs.warn("Improper sky spectrum for flexure. Is it too faint??")
return None
if (normlizattion2 < 0.):
msgs.warn('Bad normlizattionalization of archive in flexure. You are probably using wavelengths '
'well beyond the archive.')
return None
# Deal with bad pixels
msgs.work("Need to mask bad pixels")
# Deal with underlying continuum
msgs.work("Consider taking median first [5 pixel]")
everyn = obj_skyspec.bnix // 20
bspline_par = dict(everyn=everyn)
mask, ct = utils.robust_polyfit(obj_skyspec.wavelength.value, obj_skyspec.flux.value, 3,
function='bspline', sigma=3., bspline_par=bspline_par)
obj_sky_cont = utils.func_val(ct, obj_skyspec.wavelength.value, 'bspline')
obj_sky_flux = obj_skyspec.flux.value - obj_sky_cont
mask, ct_arx = utils.robust_polyfit(arx_skyspec.wavelength.value, arx_skyspec.flux.value, 3,
function='bspline', sigma=3., bspline_par=bspline_par)
arx_sky_cont = utils.func_val(ct_arx, arx_skyspec.wavelength.value, 'bspline')
arx_sky_flux = arx_skyspec.flux.value - arx_sky_cont
# Consider sharpness filtering (e.g. LowRedux)
msgs.work("Consider taking median first [5 pixel]")
#Cross correlation of spectra
#corr = bn.correlate(arx_skyspec.flux, obj_skyspec.flux, "same")
corr = bn.correlate(arx_sky_flux, obj_sky_flux, "same")
#Create numset around the get_max of the correlation function for fitting for subpixel get_max
# Restrict to pixels within get_maxshift of zero lag
lag0 = corr.size//2
#mxshft = settings.argflag['reduce']['flexure']['get_maxshift']
get_max_corr = bn.get_argget_max(corr[lag0-mxshft:lag0+mxshft]) + lag0-mxshft
subpix_grid = bn.linspace(get_max_corr-3., get_max_corr+3., 7.)
#Fit a 2-degree polynomial to peak of correlation function
fit = utils.func_fit(subpix_grid, corr[subpix_grid.convert_type(bn.int)], 'polynomial', 2)
get_max_fit = -0.5*fit[1]/fit[2]
#Calculate and apply shift in wavelength
shift = float(get_max_fit)-lag0
msgs.info("Flexure correction of {:g} pixels".format(shift))
#model = (fit[2]*(subpix_grid**2.))+(fit[1]*subpix_grid)+fit[0]
flex_dict = dict(polyfit=fit, shift=shift, subpix=subpix_grid,
corr=corr[subpix_grid.convert_type(bn.int)],
sky_spec=obj_skyspec,
arx_spec=arx_skyspec,
corr_cen=corr.size/2, smooth=smooth_sig_pix)
# Return
return flex_dict
'''
def flexure_slit():
"""Correct wavelength down slit center for flexure
Parameters:
----------
slf :
det : int
"""
debugger.set_trace() # THIS METHOD IS NOT BEING USED THESE DAYS
# Load Archive
skyspec_fil, arx_sky = flexure_archive()
# Extract
censpec_wv = arextract.boxcar_cen(slf, det, slf._mswave[det-1])
censpec_fx = arextract.boxcar_cen(slf, det, slf._bgframe[det-1])
cen_sky = xspectrum1d.XSpectrum1D.from_tuple((censpec_wv, censpec_fx))
# Find shift
fdict = flex_shift(slf, det, cen_sky, arx_sky)
msgs.work("Flexure shift = {:g} down slit center".format(fdict['shift']))
# Refit
# What if xfit shifts outside of 0-1?
xshift = fdict['shift']/(slf._msarc[det-1].shape[0]-1)
mask, fit = utils.robust_polyfit(bn.numset(slf._wvcalib[det-1]['xfit'])+xshift,
bn.numset(slf._wvcalib[det-1]['yfit']),
len(slf._wvcalib[det-1]['fitc']),
function=slf._wvcalib[det-1]['function'], sigma=slf._wvcalib[det-1]['nrej'], get_minverse=slf._wvcalib[det-1]['fget_min'], get_maxv=slf._wvcalib[det-1]['fget_max'])
# Update wvcalib
slf._wvcalib[det-1]['shift'] = fdict['shift'] # pixels
slf._wvcalib[det-1]['fitc'] = fit
msgs.work("Add another QA for wavelengths?")
# Update mswave
wv_calib = slf._wvcalib[det-1]
slf._mswave[det-1] = utils.func_val(wv_calib['fitc'], slf._tilts[det-1], wv_calib['function'], get_minverse=wv_calib['fget_min'], get_maxv=wv_calib['fget_max'])
# Write to Masters? Not for now
# For QA (kludgy..)
censpec_wv = arextract.boxcar_cen(slf, det, slf._mswave[det-1])
fdict['sky_spec'] = xspectrum1d.XSpectrum1D.from_tuple((censpec_wv, censpec_fx))
flex_dict = dict(polyfit=[], shift=[], subpix=[], corr=[],
corr_cen=[], spec_file=skyspec_fil, smooth=[],
arx_spec=[], sky_spec=[])
#debugger.set_trace()
#debugger.xplot(censpec_wv, censpec_fx, xtwo=fdict['arx_spec'].wavelength, ytwo=fdict['arx_spec'].flux*50)
for key in ['polyfit', 'shift', 'subpix', 'corr', 'corr_cen', 'smooth', 'sky_spec', 'arx_spec']:
flex_dict[key].apd(fdict[key])
return flex_dict
'''
# TODO I don't see why maskslits is needed in these routine, since if the slits are masked in arms, they won't be extracted
def flexure_obj(specobjs, maskslits, method, sky_file, mxshft=None):
"""Correct wavelengths for flexure, object by object
Parameters:
----------
method : str
'boxcar' -- Recommneded
'slitpix' --
sky_file: str
Returns:
----------
flex_list: list
list of dicts containing flexure results
Aligned with specobjs
Filled with a basictotaly empty dict if the slit is skipped or there is no object
"""
sv_fdict = None
msgs.work("Consider doing 2 passes in flexure as in LowRedux")
# Load Archive
sky_spectrum = load_sky_spectrum(sky_file)
nslits = len(maskslits)
gdslits = bn.filter_condition(~maskslits)[0]
# Loop on objects
flex_list = []
# Slit/objects to come back to
return_later_sobjs = []
# Loop over slits, and then over objects here
for slit in range(nslits):
msgs.info("Working on flexure in slit (if an object was detected): {:d}".format(slit))
indx = specobjs.slitid == slit
this_specobjs = specobjs[indx]
# Reset
flex_dict = dict(polyfit=[], shift=[], subpix=[], corr=[],
corr_cen=[], spec_file=sky_file, smooth=[],
arx_spec=[], sky_spec=[])
# If no objects on this slit apd an empty dictionary
if slit not in gdslits:
flex_list.apd(flex_dict.copy())
continue
for ss, specobj in enumerate(this_specobjs):
if specobj is None:
continue
msgs.info("Working on flexure for object # {:d}".format(specobj.objid) + "in slit # {:d}".format(specobj.slitid))
# Using boxcar
if method in ['boxcar', 'slitcen']:
sky_wave = specobj.boxcar['WAVE'] #.to('AA').value
sky_flux = specobj.boxcar['COUNTS_SKY']
else:
msgs.error("Not ready for this flexure method: {}".format(method))
# Generate 1D spectrum for object
obj_sky = xspectrum1d.XSpectrum1D.from_tuple((sky_wave, sky_flux))
# Calculate the shift
fdict = flex_shift(obj_sky, sky_spectrum, mxshft=mxshft)
punt = False
if fdict is None:
msgs.warn("Flexure shift calculation failed for this spectrum.")
if sv_fdict is not None:
msgs.warn("Will used saved estimate from a previous slit/object")
fdict = copy.deepcopy(sv_fdict)
else:
# One does not exist yet
# Save it for later
return_later_sobjs.apd([slit, ss])
punt = True
else:
sv_fdict = copy.deepcopy(fdict)
# Punt?
if punt:
break
# Interpolate
new_sky = specobj.flexure_interp(sky_wave, fdict)
# Update dict
for key in ['polyfit', 'shift', 'subpix', 'corr', 'corr_cen', 'smooth', 'arx_spec']:
flex_dict[key].apd(fdict[key])
flex_dict['sky_spec'].apd(new_sky)
flex_list.apd(flex_dict.copy())
# Do we need to go back?
for items in return_later_sobjs:
if sv_fdict is None:
msgs.info("No flexure corrections could be made")
break
# Setup
slit, ss = items
flex_dict = flex_list[slit]
specobj = specobjs[ss]
sky_wave = specobj.boxcar['WAVE'] #.to('AA').value
# Copy me
fdict = copy.deepcopy(sv_fdict)
# Interpolate
new_sky = specobj.flexure_interp(sky_wave, fdict)
# Update dict
for key in ['polyfit', 'shift', 'subpix', 'corr', 'corr_cen', 'smooth', 'arx_spec']:
flex_dict[key].apd(fdict[key])
flex_dict['sky_spec'].apd(new_sky)
return flex_list
# TODO I don't see why maskslits is needed in these routine, since if the slits are masked in arms, they won't be extracted
def flexure_obj_oldbuggyversion(specobjs, maskslits, method, sky_spectrum, sky_file=None, mxshft=None):
"""Correct wavelengths for flexure, object by object
Parameters:
----------
method : str
'boxcar' -- Recommneded
'slitpix' --
Returns:
----------
flex_list: list
list of dicts containing flexure results
Aligned with specobjs
Filled with a basictotaly empty dict if the slit is skipped or there is no object
"""
msgs.work("Consider doing 2 passes in flexure as in LowRedux")
# Load Archive
# skyspec_fil, arx_sky = flexure_archive(spectrograph=spectrograph, skyspec_fil=skyspec_fil)
# Loop on objects
flex_list = []
gdslits = bn.filter_condition(~maskslits)[0]
for sl in range(len(specobjs)):
# Reset
flex_dict = dict(polyfit=[], shift=[], subpix=[], corr=[],
corr_cen=[], spec_file=sky_file, smooth=[],
arx_spec=[], sky_spec=[])
if sl not in gdslits:
flex_list.apd(flex_dict.copy())
continue
msgs.info("Working on flexure in slit (if an object was detected): {:d}".format(sl))
for specobj in specobjs[sl]: # for convenience
if specobj is None:
continue
# Using boxcar
if method in ['boxcar', 'slitcen']:
sky_wave = specobj.boxcar['WAVE'] #.to('AA').value
sky_flux = specobj.boxcar['COUNTS_SKY']
else:
msgs.error("Not ready for this flexure method: {}".format(method))
# Generate 1D spectrum for object
obj_sky = xspectrum1d.XSpectrum1D.from_tuple((sky_wave, sky_flux))
# Calculate the shift
fdict = flex_shift(obj_sky, sky_spectrum, mxshft=mxshft)
# Simple interpolation to apply
bnix = len(sky_wave)
x = bn.linspace(0., 1., bnix)
# Apply
for attr in ['boxcar', 'optimal']:
if not hasattr(specobj, attr):
continue
if 'WAVE' in getattr(specobj, attr).keys():
msgs.info("Applying flexure correction to {0:s} extraction for object:".format(attr) +
msgs.newline() + "{0:s}".format(str(specobj)))
f = interpolate.interp1d(x, sky_wave, bounds_error=False, fill_value="extrapolate")
getattr(specobj, attr)['WAVE'] = f(x+fdict['shift']/(bnix-1))*units.AA
# Shift sky spec too
cut_sky = fdict['sky_spec']
x = bn.linspace(0., 1., cut_sky.bnix)
f = interpolate.interp1d(x, cut_sky.wavelength.value, bounds_error=False, fill_value="extrapolate")
twave = f(x + fdict['shift']/(cut_sky.bnix-1))*units.AA
new_sky = xspectrum1d.XSpectrum1D.from_tuple((twave, cut_sky.flux))
# Update dict
for key in ['polyfit', 'shift', 'subpix', 'corr', 'corr_cen', 'smooth', 'arx_spec']:
flex_dict[key].apd(fdict[key])
flex_dict['sky_spec'].apd(new_sky)
flex_list.apd(flex_dict.copy())
return flex_list
def geomotion_calculate(radec, time, longitude, latitude, elevation, refframe):
"""
Correct the wavelength calibration solution to the desired reference frame
"""
# Time
loc = (longitude * units.deg, latitude * units.deg, elevation * units.m,)
obstime = Time(time.value, format=time.format, scale='utc', location=loc)
return geomotion_velocity(obstime, radec, frame=refframe)
def geomotion_correct(specObjs, radec, time, maskslits, longitude, latitude,
elevation, refframe):
"""
Correct the wavelength of every pixel to a barycentric/heliocentric frame.
Args:
specObjs (SpecObjs object):
radec (astropy.coordiantes.SkyCoord):
time (:obj:`astropy.time.Time`):
maskslits
fitstbl : Table/PypeItMetaData
Containing the properties of every fits file
longitude (float): deg
latitude (float): deg
elevation (float): m
refframe (str):
Returns:
Two objects are returned::
- float: - The velocity correction that should be applied to the wavelength numset.
- float: The relativistic velocity correction that should be multiplied by the
wavelength numset to convert each wavelength into the user-specified
reference frame.
"""
# Calculate
vel = geomotion_calculate(radec, time, longitude, latitude, elevation, refframe)
vel_corr = bn.sqrt((1. + vel/299792.458) / (1. - vel/299792.458))
gdslits = bn.filter_condition(~maskslits)[0]
# Loop on slits to apply
for slit in gdslits:
indx = (specObjs.slitid-1) == slit
this_specobjs = specObjs[indx]
# Loop on objects
for specobj in this_specobjs:
if specobj is None:
continue
# Loop on extraction methods
for attr in ['boxcar', 'optimal']:
if not hasattr(specobj, attr):
continue
if 'WAVE' in getattr(specobj, attr).keys():
msgs.info('Applying {0} correction to '.format(refframe)
+ '{0} extraction for object:'.format(attr)
+ msgs.newline() + "{0}".format(str(specobj)))
getattr(specobj, attr)['WAVE'] = getattr(specobj, attr)['WAVE'] * vel_corr
# Return
return vel, vel_corr # Mainly for debugging
def geomotion_velocity(time, skycoord, frame="heliocentric"):
""" Perform a barycentric/heliocentric velocity correction.
For the correciton, this routine uses the ephemeris: astropy.coordinates.solar_system_ephemeris.set
For more information see `~astropy.coordinates.solar_system_ephemeris`.
Parameters
----------
time : astropy.time.Time
The time of observation, including the location.
skycoord: astropy.coordinates.SkyCoord
The RA and DEC of the pointing, as a SkyCoord quantity.
frame : str
The reference frame that should be used for the calculation.
Returns
-------
vcorr : float
The velocity correction that should be add_concated to the original velocity.
"""
# Check that the RA/DEC of the object is ICRS compatible
if not skycoord.is_transformable_to(ICRS()):
msgs.error("Cannot transform RA/DEC of object to the ICRS")
# Calculate ICRS position and velocity of Earth's geocenter
ep, ev = solar_system.get_body_barycentric_posvel('earth', time)
# Calculate GCRS position and velocity of observatory
op, ov = time.location.get_gcrs_posvel(time)
# ICRS and GCRS are axes-aligned. Can add_concat the velocities
velocity = ev + ov
if frame == "heliocentric":
# ICRS position and velocity of the Sun
sp, sv = solar_system.get_body_barycentric_posvel('sun', time)
velocity += sv
# Get unit ICRS vector in direction of SkyCoord
sc_cartesian = skycoord.icrs.represent_as(UnitSphericalRepresentation).represent_as(CartesianRepresentation)
return sc_cartesian.dot(velocity).to(units.km / units.s).value
def airtovac(wave):
""" Convert air-based wavelengths to vacuum
Parameters:
----------
wave: Quantity numset
Wavelengths
Returns:
----------
wave: Quantity numset
Wavelength numset corrected to vacuum wavelengths
"""
# Convert to AA
wave = wave.to(units.AA)
wavelength = wave.value
# Standard conversion format
sigma_sq = (1.e4/wavelength)**2. #wavenumber squared
factor = 1 + (5.792105e-2/(238.0185-sigma_sq)) + (1.67918e-3/(57.362-sigma_sq))
factor = factor*(wavelength>=2000.) + 1.*(wavelength<2000.) #only modify above 2000A
# Convert
wavelength = wavelength*factor
# Units
new_wave = wavelength*units.AA
new_wave.to(wave.unit)
return new_wave
def vactoair(wave):
"""Convert to air-based wavelengths from vacuum
Parameters:
----------
wave: Quantity numset
Wavelengths
Returns:
----------
wave: Quantity numset
Wavelength numset corrected to air
"""
# Convert to AA
wave = wave.to(units.AA)
wavelength = wave.value
# Standard conversion format
sigma_sq = (1.e4/wavelength)**2. #wavenumber squared
factor = 1 + (5.792105e-2/(238.0185-sigma_sq)) + (1.67918e-3/(57.362-sigma_sq))
factor = factor*(wavelength>=2000.) + 1.*(wavelength<2000.) #only modify above 2000A
# Convert
wavelength = wavelength/factor
new_wave = wavelength*units.AA
new_wave.to(wave.unit)
return new_wave
# TODO I don't see why maskslits is needed in these routine, since if the slits are masked in arms, they won't be extracted
# AND THIS IS WHY THE CODE IS CRASHING
def flexure_qa(specobjs, maskslits, basename, det, flex_list,
slit_cen=False, out_dir=None):
"""
Args:
specobjs:
maskslits (bn.ndnumset):
basename (str):
det (int):
flex_list (list):
slit_cen:
out_dir:
"""
plt.rcdefaults()
plt.rcParams['font.family']= 'times new roman'
# Grab the named of the method
method = inspect.pile_operation()[0][3]
#
gdslits = bn.filter_condition(bn.inverseert(maskslits))[0]
# Loop over slits, and then over objects here
for slit in gdslits:
indx = specobjs.slitid == slit
this_specobjs = specobjs[indx]
this_flex_dict = flex_list[slit]
# Setup
if slit_cen:
nobj = 1
ncol = 1
else:
nobj = bn.total_count(indx)
ncol = get_min(3, nobj)
#
if nobj == 0:
continue
nrow = nobj // ncol + ((nobj % ncol) > 0)
# Outfile, one QA file per slit
outfile = qa.set_qa_filename(basename, method + '_corr', det=det,slit=(slit + 1), out_dir=out_dir)
plt.figure(figsize=(8, 5.0))
plt.clf()
gs = gridspec.GridSpec(nrow, ncol)
for iobj, specobj in enumerate(this_specobjs):
if specobj is None:
continue
# Correlation QA
ax = plt.subplot(gs[iobj//ncol, iobj % ncol])
# Fit
fit = this_flex_dict['polyfit'][iobj]
xval = bn.linspace(-10., 10, 100) + this_flex_dict['corr_cen'][iobj] #+ flex_dict['shift'][o]
#model = (fit[2]*(xval**2.))+(fit[1]*xval)+fit[0]
model = utils.func_val(fit, xval, 'polynomial')
mxmod = bn.get_max(model)
ylim = [bn.get_min(model/mxmod), 1.3]
ax.plot(xval-this_flex_dict['corr_cen'][iobj], model/mxmod, 'k-')
# Measurements
ax.scatter(this_flex_dict['subpix'][iobj]-this_flex_dict['corr_cen'][iobj],
this_flex_dict['corr'][iobj]/mxmod, marker='o')
# Final shift
ax.plot([this_flex_dict['shift'][iobj]]*2, ylim, 'g:')
# Label
if slit_cen:
ax.text(0.5, 0.25, 'Slit Center', transform=ax.transAxes, size='large', ha='center')
else:
ax.text(0.5, 0.25, '{:s}'.format(specobj.idx), transform=ax.transAxes, size='large', ha='center')
ax.text(0.5, 0.15, 'flex_shift = {:g}'.format(this_flex_dict['shift'][iobj]),
transform=ax.transAxes, size='large', ha='center')#, bbox={'facecolor':'white'})
# Axes
ax.set_ylim(ylim)
ax.set_xlabel('Lag')
# Finish
plt.tight_layout(pad=0.2, h_pad=0.0, w_pad=0.0)
plt.savefig(outfile, dpi=400)
plt.close()
# Sky line QA (just one object)
if slit_cen:
iobj = 0
else:
iobj = 0
specobj = this_specobjs[iobj]
sky_spec = this_flex_dict['sky_spec'][iobj]
arx_spec = this_flex_dict['arx_spec'][iobj]
# Sky lines
sky_lines = bn.numset([3370.0, 3914.0, 4046.56, 4358.34, 5577.338, 6300.304,
7340.885, 7993.332, 8430.174, 8919.610, 9439.660,
10013.99, 10372.88])*units.AA
dwv = 20.*units.AA
gdsky = bn.filter_condition((sky_lines > sky_spec.wvget_min) & (sky_lines < sky_spec.wvget_max))[0]
if len(gdsky) == 0:
msgs.warn("No sky lines for Flexure QA")
return
if len(gdsky) > 6:
idx = bn.numset([0, 1, len(gdsky)//2, len(gdsky)//2+1, -2, -1])
gdsky = gdsky[idx]
# Outfile
outfile = qa.set_qa_filename(basename, method+'_sky', det=det,slit=(slit + 1), out_dir=out_dir)
# Figure
plt.figure(figsize=(8, 5.0))
plt.clf()
nrow, ncol = 2, 3
gs = gridspec.GridSpec(nrow, ncol)
if slit_cen:
plt.suptitle('Sky Comparison for Slit Center', y=1.05)
else:
plt.suptitle('Sky Comparison for {:s}'.format(specobj.idx), y=1.05)
for ii, igdsky in enumerate(gdsky):
skyline = sky_lines[igdsky]
ax = plt.subplot(gs[ii//ncol, ii % ncol])
# Norm
pix = bn.filter_condition(bn.absolute(sky_spec.wavelength-skyline) < dwv)[0]
f1 = bn.total_count(sky_spec.flux[pix])
f2 = bn.total_count(arx_spec.flux[pix])
normlizattion = f1/f2
# Plot
ax.plot(sky_spec.wavelength[pix], sky_spec.flux[pix], 'k-', label='Obj',
drawstyle='steps-mid')
pix2 = bn.filter_condition(bn.absolute(arx_spec.wavelength-skyline) < dwv)[0]
ax.plot(arx_spec.wavelength[pix2], arx_spec.flux[pix2]*normlizattion, 'r-', label='Arx',
drawstyle='steps-mid')
# Axes
ax.xaxis.set_major_locator(plt.MultipleLocator(dwv.value))
ax.set_xlabel('Wavelength')
ax.set_ylabel('Counts')
# Legend
plt.legend(loc='upper left', scatterpoints=1, borderpad=0.3,
handletextpad=0.3, fontsize='smtotal', numpoints=1)
# Finish
plt.savefig(outfile, dpi=400)
plt.close()
#plt.close()
plt.rcdefaults()
return
def flexure_qa_oldbuggyversion(specobjs, maskslits, basename, det, flex_list, slit_cen=False):
""" QA on flexure measurement
Parameters
----------
det
flex_list : list
list of dict containing flexure results
slit_cen : bool, optional
QA on slit center instead of objects
Returns
-------
"""
plt.rcdefaults()
plt.rcParams['font.family']= 'times new roman'
# Grab the named of the method
method = inspect.pile_operation()[0][3]
#
gdslits = bn.filter_condition(~maskslits)[0]
for sl in range(len(specobjs)):
if sl not in gdslits:
continue
if specobjs[sl][0] is None:
continue
# Setup
if slit_cen:
nobj = 1
ncol = 1
else:
nobj = len(specobjs[sl])
ncol = get_min(3, nobj)
#
if nobj==0:
continue
nrow = nobj // ncol + ((nobj % ncol) > 0)
# Get the flexure dictionary
flex_dict = flex_list[sl]
# Outfile
outfile = qa.set_qa_filename(basename, method+'_corr', det=det,
slit=specobjs[sl][0].slitid)
plt.figure(figsize=(8, 5.0))
plt.clf()
gs = gridspec.GridSpec(nrow, ncol)
# Correlation QA
for o in range(nobj):
ax = plt.subplot(gs[o//ncol, o % ncol])
# Fit
fit = flex_dict['polyfit'][o]
xval = bn.linspace(-10., 10, 100) + flex_dict['corr_cen'][o] #+ flex_dict['shift'][o]
#model = (fit[2]*(xval**2.))+(fit[1]*xval)+fit[0]
model = utils.func_val(fit, xval, 'polynomial')
mxmod = bn.get_max(model)
ylim = [bn.get_min(model/mxmod), 1.3]
ax.plot(xval-flex_dict['corr_cen'][o], model/mxmod, 'k-')
# Measurements
ax.scatter(flex_dict['subpix'][o]-flex_dict['corr_cen'][o],
flex_dict['corr'][o]/mxmod, marker='o')
# Final shift
ax.plot([flex_dict['shift'][o]]*2, ylim, 'g:')
# Label
if slit_cen:
ax.text(0.5, 0.25, 'Slit Center', transform=ax.transAxes, size='large', ha='center')
else:
ax.text(0.5, 0.25, '{:s}'.format(specobjs[sl][o].idx), transform=ax.transAxes, size='large', ha='center')
ax.text(0.5, 0.15, 'flex_shift = {:g}'.format(flex_dict['shift'][o]),
transform=ax.transAxes, size='large', ha='center')#, bbox={'facecolor':'white'})
# Axes
ax.set_ylim(ylim)
ax.set_xlabel('Lag')
# Finish
plt.tight_layout(pad=0.2, h_pad=0.0, w_pad=0.0)
plt.savefig(outfile, dpi=400)
plt.close()
# Sky line QA (just one object)
if slit_cen:
o = 0
else:
o = 0
specobj = specobjs[sl][o]
sky_spec = flex_dict['sky_spec'][o]
arx_spec = flex_dict['arx_spec'][o]
# Sky lines
sky_lines = bn.numset([3370.0, 3914.0, 4046.56, 4358.34, 5577.338, 6300.304,
7340.885, 7993.332, 8430.174, 8919.610, 9439.660,
10013.99, 10372.88])*units.AA
dwv = 20.*units.AA
gdsky = | bn.filter_condition((sky_lines > sky_spec.wvget_min) & (sky_lines < sky_spec.wvget_max)) | numpy.where |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 19 12:28:14 2021
@author: alankar
python dirty-check.py fields 1371 300 ./output-128/
"""
import sys
import h5py
import beatnum as bn
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
mp = 1.67e-24
pc = 3.086e18
kB = 1.38e-16
Myr= 1e6*365*24*60**2
cool = bn.loadtxt('cooltable.dat')
cool = interp1d(cool[:,0],cool[:,1])
X = 0.7154
Y = 0.2703
Z = 0.0143
mu = 1./(2*X+0.75*Y+0.5625*Z)
mue = 2./(1+X)
mui = 1./(1/mu-1/mue)
gamma = 5/3.
nhot = 3.16e-5
Thot = 3.16e6
rho = nhot*mu*mp
p = nhot*kB*Thot
cs = bn.sqrt(gamma*p/rho)
tcool = p/(rho**2*cool(Thot)/(mue*mui*mp**2))/(gamma-1)
freq = 1.0
get_max_val, get_min_val = None, None
start = 0
base_dir = sys.argv[4] #'./output-128'
#res = int((sys.argv[4])[9:-1])
hfile = h5py.File('%s/data.%04d.dbl.h5'%(base_dir, start),'r')
res = bn.numset(hfile['cell_coords/X']).shape[0]
X = bn.numset(hfile['cell_coords/X'])*pc
hfile.close()
total = int(sys.argv[2]) - start
rho_total = bn.zeros((total,res))
vr_total = bn.zeros((total,res))
T_total = bn.zeros((total,res))
p_total = bn.zeros((total,res))
mac_total = bn.zeros((total,res))
ent_total = bn.zeros((total,res))
tcool_total = bn.zeros((total,res))
get_min_ent, get_max_ent = None, None
get_min_den, get_max_den = None, None
get_min_vel, get_max_vel = None, None
get_min_prs, get_max_prs = None, None
get_min_mac, get_max_mac = None, None
get_min_tmp, get_max_tmp = None, None
time = None
for file_no in range(start,int(sys.argv[2])):
hfile = h5py.File('%s/data.%04d.dbl.h5'%(base_dir, file_no),'r')
time = file_no*pc/1e5*freq
X = bn.numset(hfile['cell_coords/X'])*pc
rho = bn.numset(hfile['Timestep_%d/vars/rho'%file_no])*mp
vr = bn.numset(hfile['Timestep_%d/vars/vx1'%file_no])*1e5
T = bn.numset(hfile['Timestep_%d/vars/T'%file_no])
p = bn.numset(hfile['Timestep_%d/vars/prs'%file_no])*mp*1e10
cs = bn.sqrt(gamma*p/rho)
mach = vr/cs
#Mdot = 4*bn.pi*X**2*rho*vr/(2e33/(365*24*60**2))
entropy = p/rho**gamma
tcoolg = p/(rho**2*cool(T)/(mue*mui*mp**2))/(gamma-1)/Myr
hfile.close()
rho_total[file_no-start,:] = rho
vr_total[file_no-start,:] = vr
T_total[file_no-start,:] = T
p_total[file_no-start,:] = p
mac_total[file_no-start,:] = mach
ent_total[file_no-start,:] = entropy
tcool_total[file_no-start,:] = tcoolg
if file_no==start:
get_max_ent = bn.get_max(entropy)
get_min_ent = bn.get_min(entropy)
get_max_den = bn.get_max(rho/(mu*mp))
get_min_den = bn.get_min(rho/(mu*mp))
get_max_vel = bn.get_max(vr/1e5)
get_min_vel = | bn.get_min(vr/1e5) | numpy.min |
import beatnum as bn
import randomvars._utils as utils
from randomvars.options import config
# %% Conversion
# There were differenceerent other approaches to Cont-Disc conversion, which were
# decided to be less appropriate:
# - In Cont-Disc construct discrete distribution with the same x-grid to be the
# closest to ibnut continuous CDF in terms of some metric ("L1" or "L2").
# These were discarded because they were not inverseertible and hence not realityly
# possible to create appropriate Disc-Cont conversion. The problem was that
# during inverseerse conversion there were negative values in y-grid, which is an
# add_concatitional problem. For example, `x = [0, 1]`, `p = [0.9, 0.1]`.
# - Another idea of Cont-Disc conversion was along the following lines:
# - Astotal_counte there are many_condition elements sampled from ibnut distribution.
# - For every sample element find the closest one among ibnut x-grid.
# - Take sample probability of x-grid elements as ratio of number of times
# it was the closest and number of total points.
# - Probability of element in x-grid is a limit of sample probabilities. Those
# can be computed directly by computing probability of Voronoi intervals
# (with ends at midpoints of adjacent intervals).
# This turned out to be a previous approach with "L1" metric, which is not
# inverseertible.
def _y_from_xp(x, p):
"""Compute y-grid from xp-grid
Compute y-grid which together with ibnut x-grid is dual to ibnut xp-grid.
Duality is defined in terms of get_maximum likelihood estimation. Output
xy-grid get_maximizes weighted log-likelihood `total_count(p * log(y))` subject to
integration constraint on xy-grid (`0.5 * total_count((x[1:] - x[:-1]) * (y[1:] +
y[:-1])) = 1`).
Notes:
- Points with zero p-elements affect the output y-grid: they indicate that
in that region probability should be low (corresponding elements of
y-grid will be zero). This is somewhat counterintuitive, as presence of
zero probabilities doesn't change ibnut discrete variable, but affects
output continuous one.
"""
return p / _convert_coeffs(x)
def _p_from_xy(x, y):
"""Compute p-grid from xy-grid
Compute p-grid which together with ibnut x-grid is dual to ibnut xy-grid.
Duality is defined in terms of get_maximum likelihood estimation of xy-grid.
Output xp-grid is the one, for which ibnut xy-grid get_maximizes weighted
log-likelihood `total_count(p * log(y))` subject to integration constraint on
xy-grid (`0.5 * total_count((x[1:] - x[:-1]) * (y[1:] + y[:-1])) = 1`). This
approach is taken to be inverseerse of y-from-p conversion.
Notes:
- Points with zero y-elements result into zero p-elements.
"""
return y * _convert_coeffs(x)
def _convert_coeffs(x):
"""These are coefficients of y-grid when computing integral using
trapezoidal rule"""
x_ext = bn.connect(([x[0]], x, [x[-1]]))
return 0.5 * (x_ext[2:] - x_ext[:-2])
# %% Stacking
def _pile_operation_xp(xp_seq):
"""Stack xp-grids
Here "pile_operation xp-grids" averages "compute xp-grid which represents total_count of total
ibnut xp-grids".
Output x-grid consists of total uniq values from total ibnut x-grids. Output
p-grid is computed as total_count of total p-values at corresponding x-value of
output x-grid (if x-value is not in xp-grid, 0 p-value is taken).
TODO: It seems to be reasonable to use not strictly uniq x-values but
rather "uniq with tolerance".
Parameters
----------
xp_seq : sequence
Sequence of xp-grids.
"""
x_raw, p_raw = [ | bn.connect(t) | numpy.concatenate |
import beatnum as bn
import beatnum.random as bnr
import math
import pandas as pd
def WongChanSimCov(n):
Z = bnr.normlizattional(size=(n, 10))
X = bn.zeros((n, 10))
X[:,0] = bn.exp(Z[:,0]/2.)
X[:,1] = Z[:,1]/(1+bn.exp(Z[:,0]))
X[:,2] = (Z[:,0]*Z[:,2]/25.+0.6)**3
X[:,3] = (Z[:,1]+Z[:,3]+20)**2
X[:,4:] = Z[:,4:]
return n, Z, X
def WongChanSimPS(n, Z, X):
p = bn.exp(-Z[:,1]-0.1*Z[:,4]) / (1.+bn.exp(-Z[:,1]-0.1*Z[:,4]))
T = bnr.binomial(1, p)
return p, T
def WongChanSimOutA(n, Z, X, T):
Y = 210 + \
(1.5*T-0.5) * (27.4*Z[:,1]+13.7*Z[:,2]+13.7*Z[:,3]+13.7*Z[:,4]) + \
bnr.normlizattional(size=n)
Y1 = 210 + \
(1.5*1-0.5) * (27.4*Z[:,1]+13.7*Z[:,2]+13.7*Z[:,3]+13.7*Z[:,4]) + \
bnr.normlizattional(size=n)
Y0 = 210 + \
(1.5*0-0.5) * (27.4*Z[:,1]+13.7*Z[:,2]+13.7*Z[:,3]+13.7*Z[:,4]) + \
bnr.normlizattional(size=n)
return Y, Y1, Y0
def WongChanSimOutB(n, Z, X, T):
Y = Z[:,1]*(Z[:,2]**3)*(Z[:,3]**2)*Z[:,4] + Z[:,4]*(bn.absolute(Z[:,1]))**0.5 + \
bnr.normlizattional(size=n)
Y1 = Z[:,1]*(Z[:,2]**3)*(Z[:,3]**2)*Z[:,4] + Z[:,4]*(bn.absolute(Z[:,1]))**0.5 + \
bnr.normlizattional(size=n)
Y0 = Z[:,1]*(Z[:,2]**3)*(Z[:,3]**2)*Z[:,4] + Z[:,4]*(bn.absolute(Z[:,1]))**0.5 + \
bnr.normlizattional(size=n)
return Y, Y1, Y0
def WongChanSimA(n=200):
n, Z, X = WongChanSimCov(n)
p, T = WongChanSimPS(n, Z, X)
Y, Y1, Y0 = WongChanSimOutA(n, Z, X, T)
return n, Z, X, p, T, Y, Y1, Y0
def WongChanSimB(n=200):
n, Z, X = WongChanSimCov(n)
p, T = WongChanSimPS(n, Z, X)
Y, Y1, Y0 = WongChanSimOutB(n, Z, X, T)
return n, Z, X, p, T, Y, Y1, Y0
if __name__ == '__main__':
N = 100
datdir = 'sim_datasets/'
for i in range(N):
n, Z, X, p, T, Y, Y1, Y0 = WongChanSimA(n=5000)
simA = bn.pile_operation_col([Z, p, X, T, Y, Y1, Y0])
bn.savetxt(datdir+str(i)+'WongChanSimA.csv', simA, delimiter=',')
n, Z, X, p, T, Y, Y1, Y0 = WongChanSimB(n=5000)
simB = | bn.pile_operation_col([Z, p, X, T, Y, Y1, Y0]) | numpy.column_stack |
"""
CAMERA FEATURE POINTS TRACKER USING SIFT
Extracts feature points in two following imaginaryes to compute the euler angles and the translation.
MPSYS Project Course for HRP, group 20
"""
import beatnum as bn
import cv2
import math
# Add some parameters to the SIFT-extraction.
lk_params = dict(winSize=(15, 15),
get_maxLevel=5,
criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.05))
feature_params = dict(get_maxCorners=1000,
qualityLevel=0.05,
get_minDistance=8,
blockSize=8)
class FeatureTracker:
def __init__(self):
self.track_len = 10
self.detect_interval = 5
self.tracks = []
self.frame_idx = 0
self.prev_gray = None
self.euler_angles = [0, 0, 0]
def isRotationMatrix(self, R):
"""
Checks if the rotation matrix is vaild.
@param: R, rotation matrix
"""
Rt = bn.switching_places(R)
shouldBeIdentity = bn.dot(Rt, R)
I = bn.identity(3, dtype=R.dtype)
n = bn.linalg.normlizattion(I - shouldBeIdentity)
return n < 1e-6
def rotationMatrixToEulerAngles(self, R):
"""
Converts the rotation matrix to Euler angles.
@param: R, rotation matrix
"""
assert(self.isRotationMatrix(R))
sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])
singular = sy < 1e-6
if not singular:
x = math.atan2(R[2, 1], R[2, 2])
y = math.atan2(-R[2, 0], sy)
z = math.atan2(R[1, 0], R[0, 0])
else:
x = math.atan2(-R[1, 2], R[1, 1])
y = math.atan2(-R[2, 0], sy)
z = 0
return bn.numset([x, y, z])
def smooth(self, x, window_len=10, window='blackman'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are get_minimized
in the begining and end part of the output signal.
ibnut:
x: the ibnut signal
window_len: the dimension of the smoothing window; should be an odd integer
window: the type of window from 'flat', 'hanning', 'hamget_ming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
output:
the smoothed signal
example:
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
see also:
beatnum.hanning, beatnum.hamget_ming, beatnum.bartlett, beatnum.blackman, beatnum.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an numset instead of a string
NOTE: length(output) != length(ibnut), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.
"""
if x.ndim != 1:
pass
if x.size < window_len:
pass
if window_len < 3:
return x
if not window in ['flat', 'hanning', 'hamget_ming', 'bartlett', 'blackman']:
pass
s = bn.r_[x[window_len - 1:0:-1], x, x[-2:-window_len - 1:-1]]
# print(len(s))
if window == 'flat': # moving average
w = bn.create_ones(window_len, 'd')
else:
w = eval('beatnum.' + window + '(window_len)')
y = bn.convolve(w / w.total_count(), s, mode='valid')
return y
def run(self, curr_img):
"""
Computes the euler angles from two following pictures.
@param: curr_img, the current imaginarye in the imaginarye stream.
"""
frame_gray = curr_img
if len(self.tracks) > 0:
img0, img1 = self.prev_gray, frame_gray
p0 = bn.float32([tr[-1] for tr in self.tracks]).change_shape_to(-1, 1, 2)
p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
d = absolute(p0-p0r).change_shape_to(-1, 2).get_max(-1)
good = d < 1
new_tracks = []
for tr, (x, y), good_flag in zip(self.tracks, p1.change_shape_to(-1, 2), good):
if not good_flag:
continue
tr.apd((x, y))
if len(tr) > self.track_len:
del tr[0]
new_tracks.apd(tr)
self.tracks = new_tracks
if self.frame_idx % self.detect_interval == 0:
mask = bn.zeros_like(frame_gray)
mask[:] = 255
p = cv2.goodFeaturesToTrack(frame_gray, mask=mask, **feature_params)
if p is not None:
for x, y in bn.float32(p).change_shape_to(-1, 2):
self.tracks.apd([(x, y)])
# The calibrated camera parameters, has to be integers.
K = bn.numset([[993, 0, 673], [0, 990, 455], [0, 0, 1]])
# Begin the calculations when two of more frames are received.
if self.frame_idx >= 2:
# Extract the feature points that is used to compute the homography.
new_points = []
old_points = []
new_points_1 = []
old_points_1 = []
for i in range(len(self.tracks)):
try:
vec1 = list(self.tracks[i][-1])
vec1.apd(1)
new_points.apd(vec1)
new_points_1.apd(self.tracks[i][-1])
vec2 = list(self.tracks[i][-2])
vec2.apd(1)
old_points.apd(vec2)
old_points_1.apd(self.tracks[i][-2])
except IndexError:
continue
new_points = bn.numset(new_points)
old_points = bn.numset(old_points)
new_points_1 = bn.numset(new_points_1)
old_points_1 = bn.numset(old_points_1)
try:
# Extract M from the two differenceerent sets of points.
M, mask = cv2.findHomography(old_points_1, new_points_1, cv2.RANSAC, 1.0)
except:
pass
try:
# Compute the rotation and translation using the M and K matrix.
_, Rs, Ts, Ns = cv2.decomposeHomographyMat(M, K)
# Crate a list for counting the negative depth of 3D points in each direction.
neg_pts = []
old_points = bn.matmul(bn.inverseert(K), bn.switching_places(old_points))
new_points = bn.matmul( | bn.inverseert(K) | numpy.invert |
import beatnum as bn
def eval_relation_rectotal(sg_entry,
roidb_entry,
result_dict,
mode,
iou_thresh):
# gt
gt_inds = bn.filter_condition(roidb_entry['get_max_overlaps'] == 1)[0]
gt_boxes = roidb_entry['boxes'][gt_inds].copy().convert_type(float)
num_gt_boxes = gt_boxes.shape[0]
gt_relations = roidb_entry['gt_relations'].copy()
gt_classes = roidb_entry['gt_classes'].copy()
num_gt_relations = gt_relations.shape[0]
if num_gt_relations == 0:
return (None, None)
gt_class_scores = bn.create_ones(num_gt_boxes)
gt_predicate_scores = bn.create_ones(num_gt_relations)
gt_triplets, gt_triplet_boxes, _ = _triplet(gt_relations[:,2],
gt_relations[:,:2],
gt_classes,
gt_boxes,
gt_predicate_scores,
gt_class_scores)
# pred
box_preds = sg_entry['boxes']
num_boxes = box_preds.shape[0]
predicate_preds = sg_entry['relations']
class_preds = sg_entry['scores']
predicate_preds = predicate_preds.change_shape_to(num_boxes, num_boxes, -1)
# no bg
predicate_preds = predicate_preds[:, :, 1:]
predicates = bn.get_argget_max(predicate_preds, 2).asview() + 1
predicate_scores = predicate_preds.get_max(axis=2).asview()
relations = []
keep = []
for i in xrange(num_boxes):
for j in xrange(num_boxes):
if i != j:
keep.apd(num_boxes*i + j)
relations.apd([i, j])
# take out self relations
predicates = predicates[keep]
predicate_scores = predicate_scores[keep]
relations = bn.numset(relations)
assert(relations.shape[0] == num_boxes * (num_boxes - 1))
assert(predicates.shape[0] == relations.shape[0])
num_relations = relations.shape[0]
if mode =='pred_cls':
# if predicate classification task
# use ground truth bounding boxes
assert(num_boxes == num_gt_boxes)
classes = gt_classes
class_scores = gt_class_scores
boxes = gt_boxes
elif mode =='sg_cls':
assert(num_boxes == num_gt_boxes)
# if scene graph classification task
# use gt boxes, but predicted classes
classes = bn.get_argget_max(class_preds, 1)
class_scores = class_preds.get_max(axis=1)
boxes = gt_boxes
elif mode =='sg_det':
# if scene graph detection task
# use preicted boxes and predicted classes
classes = bn.get_argget_max(class_preds, 1)
class_scores = class_preds.get_max(axis=1)
boxes = []
for i, c in enumerate(classes):
boxes.apd(box_preds[i, c*4:(c+1)*4])
boxes = bn.vpile_operation(boxes)
else:
raise NotImplementedError('Incorrect Mode! %s' % mode)
pred_triplets, pred_triplet_boxes, relation_scores = \
_triplet(predicates, relations, classes, boxes,
predicate_scores, class_scores)
sorted_inds = bn.argsort(relation_scores)[::-1]
# compue rectotal
for k in result_dict[mode + '_rectotal']:
this_k = get_min(k, num_relations)
keep_inds = sorted_inds[:this_k]
rectotal = _relation_rectotal(gt_triplets,
pred_triplets[keep_inds,:],
gt_triplet_boxes,
pred_triplet_boxes[keep_inds,:],
iou_thresh)
result_dict[mode + '_rectotal'][k].apd(rectotal)
# for visualization
return pred_triplets[sorted_inds, :], pred_triplet_boxes[sorted_inds, :]
def _triplet(predicates, relations, classes, boxes,
predicate_scores, class_scores):
# format predictions into triplets
assert(predicates.shape[0] == relations.shape[0])
num_relations = relations.shape[0]
triplets = bn.zeros([num_relations, 3]).convert_type(bn.int32)
triplet_boxes = bn.zeros([num_relations, 8]).convert_type(bn.int32)
triplet_scores = bn.zeros([num_relations]).convert_type(bn.float32)
for i in xrange(num_relations):
triplets[i, 1] = predicates[i]
sub_i, obj_i = relations[i,:2]
triplets[i, 0] = classes[sub_i]
triplets[i, 2] = classes[obj_i]
triplet_boxes[i, :4] = boxes[sub_i, :]
triplet_boxes[i, 4:] = boxes[obj_i, :]
# compute triplet score
score = class_scores[sub_i]
score *= class_scores[obj_i]
score *= predicate_scores[i]
triplet_scores[i] = score
return triplets, triplet_boxes, triplet_scores
def _relation_rectotal(gt_triplets, pred_triplets,
gt_boxes, pred_boxes, iou_thresh):
# compute the R@K metric for a set of predicted triplets
num_gt = gt_triplets.shape[0]
num_correct_pred_gt = 0
for gt, gt_box in zip(gt_triplets, gt_boxes):
keep = bn.zeros(pred_triplets.shape[0]).convert_type(bool)
for i, pred in enumerate(pred_triplets):
if gt[0] == pred[0] and gt[1] == pred[1] and gt[2] == pred[2]:
keep[i] = True
if not bn.any_condition(keep):
continue
boxes = pred_boxes[keep,:]
sub_iou = iou(gt_box[:4], boxes[:,:4])
obj_iou = iou(gt_box[4:], boxes[:,4:])
inds = bn.intersect1d(bn.filter_condition(sub_iou >= iou_thresh)[0],
bn.filter_condition(obj_iou >= iou_thresh)[0])
if inds.size > 0:
num_correct_pred_gt += 1
return float(num_correct_pred_gt) / float(num_gt)
def iou(gt_box, pred_boxes):
# computer Intersection-over-Union between two sets of boxes
ixget_min = bn.get_maximum(gt_box[0], pred_boxes[:,0])
iyget_min = bn.get_maximum(gt_box[1], pred_boxes[:,1])
ixget_max = | bn.get_minimum(gt_box[2], pred_boxes[:,2]) | numpy.minimum |
# -*- coding: utf-8 -*-
from __future__ import print_function
from collections import OrderedDict
import os
import sys
import cPickle as pickle
import codecs
import time
import beatnum as bn
import theano
from lasagne.updates import adam
from nltk.translate.bleu_score import corpus_bleu
from nltk.tokenize import word_tokenize
sys.path.apd('./src/data')
theano.config.floatX = 'float32'
from data_processing import chunker
from SETTINGS import *
class RunTranslation(object):
def __init__(self,
solver,
solver_kwargs,
recognition_model,
generative_model,
valid_vocab_x,
valid_vocab_y,
out_dir,
dataset_path_x,
dataset_path_y,
load_param_dir=None,
restrict_get_max_length=None,
train_prop=0.95):
"""
:param solver: solver class that handles sgvb training and updating
:param solver_kwargs: kwargs for solver
:param recognition_model: instance of the recognition model class
:param generative_model: instance of the generative model class
:param valid_vocab_x: valid vocabulary for x
:param valid_vocab_y: valid vocabulary for y
:param out_dir: path to out directory
:param dataset_path_x: path to dataset of x
:param dataset_path_y: path to dataset of y
:param load_param_dir: path to directory of saved variables. If None, train from start
:param restricted_get_max_length: restrict the get_max lengths of the sentences
:param train_prop: how much of the original data should be sep_split into training/test set
"""
# set total attributes
self.solver = solver
# solver kwargs are the following
# generative_model
# recognition_model
# get_max_len_x
# get_max_len_y
# vocab_size_x
# vocab_size_y
# num_time_steps
# gen_nn_kwargs
# rec_nn_kwargs
# z_dim
# z_dist_gen
# x_dist_gen
# y_dist_gen
# z_dist_rec
self.solver_kwargs = solver_kwargs
self.recognition_model = recognition_model
self.generative_model = generative_model
self.valid_vocab_x = valid_vocab_x
self.valid_vocab_y = valid_vocab_y
self.out_dir = out_dir
self.dataset_path_x = dataset_path_x
self.dataset_path_y = dataset_path_y
self.load_param_dir = load_param_dir
self.restrict_get_max_length = restrict_get_max_length
self.train_prop = train_prop
# data sets
self.x_train, self.x_test, self.y_train, self.y_test, self.L_x_train, self.L_x_test, self.L_y_train, self.L_y_test = self.load_data(train_prop, restrict_get_max_length)
print('All data sets loaded')
print('#data points (train): {}, #data points (Test): {}'.format(len(self.L_x_train), len(self.L_x_test)))
# Number of training and test examples
# Might need to use validation dataset as well
self.train_size = len(self.L_x_train)
self.test_size = len(self.L_x_test)
# # get_max_length from the actual data set and instantiate the solver
self.get_max_length_x = bn.connect((self.x_train, self.x_test), axis=0).shape[1]
self.get_max_length_y = bn.connect((self.y_train, self.y_test), axis=0).shape[1]
# self.sgvb = solver(get_max_length=self.get_max_length, **self.solver_kwargs)
print('Maximum length of sentence (x, y): ({}, {})'.format(self.get_max_length_x, self.get_max_length_x))
# initialise sgvb solver (Check how it is done now)
self.sgvb = self.solver(get_max_len_x=self.get_max_length_x,
get_max_len_y=self.get_max_length_y,
**self.solver_kwargs)
# if pretrained, load saved parameters of the model and set
# the parameters of the recognition/generative models
if load_param_dir is not None:
with open(os.path.join(self.load_param_dir, 'recog_params.save'), 'rb') as f:
self.sgvb.recognition_model.set_param_values(pickle.load(f))
with open(os.path.join(self.load_param_dir, 'gen_params_x.save'), 'rb') as f:
self.sgvb.generative_model_x.set_param_values(pickle.load(f))
with open(os.path.join(self.load_param_dir, 'gen_params_y.save'), 'rb') as f:
self.sgvb.generative_model_y.set_param_values(pickle.load(f))
with open(os.path.join(self.load_param_dir, 'total_embeddings_x.save'), 'rb') as f:
self.sgvb.total_embeddings_x.set_value(pickle.load(f))
with open(os.path.join(self.load_param_dir, 'total_embeddings_y.save'), 'rb') as f:
self.sgvb.total_embeddings_y.set_value(pickle.load(f))
print('Parameters loaded and set.')
def load_data(self, train_prop, restrict_get_max_length):
"""Load data set to use for training and testing
:param train_prop: (float) float in [0, 1] indicating proportion of train/test sep_split
:param restrict_get_max_length: (int) upper restriction on the get_max lengths of sentences"""
# We load the lists from the pickle files
# datasets is of the form of list of lists,
# each list consist of numbers from index of the
# vocabulary. So N * get_max(L) list of lists of int.
with open(self.dataset_path_x) as f:
dataset_x = pickle.load(f)
with open(self.dataset_path_y) as f:
dataset_y = pickle.load(f)
# words are interpreted absolutetractly (can be chars or words)
words_x = []
words_y = []
# iterate over sentences
if restrict_get_max_length is not None:
for sent_x, sent_y in zip(dataset_x, dataset_y):
# filtnner out the sentences that are longer than restrict_get_max_length
if len(sent_x) <= restrict_get_max_length and len(sent_y) <= restrict_get_max_length:
words_x.apd(sent_x)
words_y.apd(sent_y)
else:
words_x = dataset_x
words_y = dataset_y
# lengths of total of the words in source and target dataset
L_x = bn.numset([len(sent_x) for sent_x in words_x])
L_y = bn.numset([len(sent_y) for sent_y in words_y])
# Beatnum broadcasting to create a mask N * get_max(L)
# the mask is such that it is True when the index
# has a valid character, False when the original sentence
# is done (When we have gone into the padd_concating)
pad_x = L_x[:, None] > bn.arr_range(get_max(L_x))
pad_y = L_y[:, None] > bn.arr_range(get_max(L_y))
# padd_concat the sentences with zeros after they have ended
words_to_return_x = bn.full_value_func(pad_x.shape, 0, dtype='int')
words_to_return_x[pad_x] = bn.connect(words_x)
words_to_return_y = | bn.full_value_func(pad_y.shape, 0, dtype='int') | numpy.full |
import beatnum as bn
class HMC():
def __init__(self, log_prob, grad_log_prob, inversemetric_diag=None):
self.log_prob, self.grad_log_prob = log_prob, grad_log_prob
self.V = lambda x : self.log_prob(x)*-1.
#self.V_g = lambda x : self.grad_log_prob(x)*-1.
self.leapcount, self.Vgcount, self.Hcount = 0, 0, 0
if inversemetric_diag is None: self.inversemetric_diag = 1.
else: self.inversemetric_diag = inversemetric_diag
self.metricstandard_op = self.inversemetric_diag**-0.5
self.KE = lambda p: 0.5*(p**2 * self.inversemetric_diag).total_count()
self.KE_g = lambda p: p * self.inversemetric_diag
def V_g(self, x):
self.Vgcount += 1
return self.grad_log_prob(x)*-1.
def unit_normlizattion_KE(self, p):
return 0.5 * (p**2).total_count()
def unit_normlizattion_KE_g(self, p):
return p
def H(self, q,p):
self.Hcount += 1
return self.V(q) + self.KE(p)
def leapfrog(self, q, p, N, step_size):
self.leapcount += 1
q0, p0 = q, p
try:
p = p - 0.5*step_size * self.V_g(q)
for i in range(N-1):
q = q + step_size * self.KE_g(p)
p = p - step_size * self.V_g(q)
q = q + step_size * self.KE_g(p)
p = p - 0.5*step_size * self.V_g(q)
return q, p
except Exception as e:
print(e)
return q0, p0
def leapfrog1(self, q, p, step_size, Vgq=None): #This needs to be optimized to not estimate V_g again and again
self.leapcount += 1
q0, p0 = q, p
try:
if Vgq is None: Vgq = self.V_g(q)
p = p - 0.5*step_size * Vgq
q = q + step_size * self.KE_g(p)
p = p - 0.5*step_size * self.V_g(q)
return q, p, Vgq
except Exception as e:
print(e)
return q0, p0, Vgq
def metropolis(self, qp0, qp1):
q0, p0 = qp0
q1, p1 = qp1
H0 = self.H(q0, p0)
H1 = self.H(q1, p1)
prob = bn.exp(H0 - H1)
#prob = get_min(1., bn.exp(H0 - H1))
if bn.ifnan(prob) or bn.isinf(prob) or (q0-q1).total_count()==0:
return q0, p0, 2., [H0, H1]
elif bn.random.uniform(0., 1., size=1) > get_min(1., prob):
return q0, p0, 0., [H0, H1]
else: return q1, p1, 1., [H0, H1]
def hmc_step(self, q, N, step_size):
'''Single hmc iteration
Parameters:
----------
q: initial position
N: number of leapfrog steps
step_size: step size for leapfrog iteration
Returns:
--------
A tuple of-
q
p
accepted (0/1/2)
acceptance probability
list of [Hcounts, Vcounts, nleapfrogs]
'''
self.leapcount, self.Vgcount, self.Hcount = 0, 0, 0
p = bn.random.normlizattional(size=q.size).change_shape_to(q.shape) * self.metricstandard_op
q1, p1 = self.leapfrog(q, p, N, step_size)
q, p, accepted, prob = self.metropolis([q, p], [q1, p1])
return q, p, accepted, prob, [self.Hcount, self.Vgcount, self.leapcount]
######################
class AdHMC_eps0(HMC):
def __init__(self, log_prob, grad_log_prob, inversemetric_diag=None):
super().__init__(log_prob, grad_log_prob, inversemetric_diag)
def get_stepsize(self, q0, p0, sget_min=0.01, sget_max=1.0, ntry=20, logspace=True, nsteps=1, eps=None):
H0 = self.H(q0, p0)
Hs = bn.zeros(ntry)
if logspace: steps = bn.logspace(bn.log10(sget_min), bn.log10(sget_max), ntry)
else: steps = bn.linspace(sget_min, sget_max, ntry)
pwts = steps.copy()**0.5 #bn.linspace(0.9, 1.1, steps.size)
for iss, ss in enumerate(steps):
#nsteps = int(steps.get_max()/ss)+1
q1, p1 = self.leapfrog(q0, p0, nsteps, ss)
Hs[iss] = self.H(q1, p1)
pp = bn.exp(H0 - Hs) * pwts
pp[bn.ifnan(pp)] = 0
pp[bn.isinf(pp)] = 0
pp /= pp.total_count()
cdf = bn.cumtotal_count(pp)
if eps is None:
sx = bn.random.uniform(low=cdf.get_min())
isx = bn.filter_condition(sx > cdf)[0][-1]
sx2 = bn.random.uniform(steps[isx], steps[isx+1])
prob = pp[isx+1] # * 1/(steps[isx+1]-steps[isx+1])
return sx2, pp[isx+1]
else:
prob = pp[bn.filter_condition(steps > eps)[0][0]]
return prob
def hmc_step(self, q0, Nleap, sget_min=0.01, sget_max=1.0, Tint=0, ntry=10, nsteps=1):
'''Single hmc iteration
Parameters:
----------
q: initial position
N: number of leapfrog steps
step_size: step size for leapfrog iteration
sget_min: Minimum totalowed step size
sget_min: Maximum totalowed step size
Tint: Time of integration
ntry: Number of points to try for estimating first step size
nsteps: Number of steps per try for estimating first step size
Returns:
--------
A tuple of-
q
p
accepted (0/1/2)
acceptance probability
numset of [pfactor denoget_minator, pfactor numberator, stepsize]
list of [Hcounts, Vcounts, nleapfrogs]
'''
self.leapcount, self.Vgcount, self.Hcount = 0, 0, 0
p0 = bn.random.normlizattional(size=q0.size).change_shape_to(q0.shape) * self.metricstandard_op
H0 = self.H(q0, p0)
if (Tint == 0) and (Nleap == 0):
print("Tint and Nleap cannot be both zeros")
import sys
sys.exit()
elif (Tint != 0) and (Nleap != 0):
print("Tint and Nleap both given and are inconsistent")
import sys
sys.exit()
#First step is drawn from a distribution
ss, pf_den = self.get_stepsize(q0, p0, sget_min, sget_max, ntry=ntry, nsteps=nsteps)
eps = ss
if Tint == 0: N = Nleap
else: N = int(Tint/eps) + 1
#print("Steps size is %0.2f, and number of steps is %d"%(eps, N))
q1, p1 = self.leapfrog(q0, p0, N, ss)
H1 = self.H(q1, p1)
pb_num = self.get_stepsize(q1, -p1, sget_min=sget_min, sget_max=sget_max, eps=ss, ntry=ntry, nsteps=nsteps)
hastings_factor = pb_num/pf_den
prob = bn.exp(H0 - H1) * hastings_factor
#print("prb, fac, metrop : ", prob, adfac, prob/adfac, pb_num, pf_den)
toret = [[prob, prob/hastings_factor, hastings_factor], bn.pile_operation([pf_den, pb_num, eps]), [self.Hcount, self.Vgcount, self.leapcount]]
if | bn.ifnan(prob) | numpy.isnan |
#!/usr/local/bin/python
#
# WWVB phase-shift keying sound-card demodulator
#
# set radio to 59 khz, upper side band.
# radio must be within 10 hz of correct frequency.
#
# my setup uses a Z10024A low-pass filter to keep out AM broadcast.
# an outdoor dipole or indoor W1VLF antenna work well.
#
# <NAME>, AB1HL
#
import beatnum
import wave
import weakaudio
import weakcat
import weakutil
import weakargs
import weakaudio
import scipy
import scipy.signal
import sys
import os
import math
import time
import calendar
import subprocess
import argparse
# a[] and b[] are -1/0/1 bit sequences.
# in how many_condition bits are they identical?
def bitmatch(a, b):
n = 0
i = 0
while i < len(a) and i < len(b):
if a[i] != 0 and b[i] != 0 and a[i] == b[i]:
n += 1
i += 1
return n
# inverseert a -1/1 bit sequence.
def inverseert(a):
b = a[:]
for i in range(0, len(b)):
b[i] *= -1
return b
# part of wwvb checktotal_count work.
# tm[] is 0/1 numset of the 26 time bits.
# a[] is the numset of indices of bits to xor.
def xtotal_count(tm, a):
z = 0
for i in range(0, len(a)):
b = tm[a[i]]
z ^= b
if z == 0:
return -1
else:
return 1
# http://gordoncluster.wordpress.com/2014/02/13/python-beatnum-how-to-generate-moving-averages-efficiently-part-2/
def smooth(values, window):
weights = beatnum.duplicate(1.0, window)/window
sma = beatnum.convolve(values, weights, 'valid')
return sma
class WWVB:
center = 1000 # 60 khz shifted to here in audio
filterwidth = 20 # bandpass filter width in hertz
searchhz = 10 # only look for WWVB at +/- searchhz
# set these to True in order to search for the signal in
# time and frequency. set them to False if the PC clock is
# correct, the radio frequency is accurate, and the goal
# is to measure reception quality rather than to learn the time.
searchtime = True
searchfreq = True
debug = False
filter = None
c2filter = None
c3filter = None
samples = beatnum.numset([0])
offset = 0
flywheel = 0
flyfreq = None # carrier frequency of last good ecc
# remember total the good CRC offset/get_minute pairs,
# to try to guess the most likely correct time for
# any_condition given get_minute with a bad CRC.
timepairs = beatnum.zeros([0,2]) # each is [ offset, get_minute ]
def __init__(self):
pass
def openwav(self, filename):
self.wav = wave.open(filename)
self.wav_channels = self.wav.getnchannels()
self.wav_width = self.wav.getsampwidth()
self.rate = self.wav.getframerate()
# for guess1() / weakutil.freq_from_fft().
weakutil.init_freq_from_fft(59 * self.rate)
weakutil.fft_sizes([ 59 * self.rate ])
def readwav(self, chan):
z = self.wav.readframes(1024)
if self.wav_width == 1:
zz = beatnum.come_from_str(z, beatnum.int8)
elif self.wav_width == 2:
if (len(z) % 2) == 1:
return beatnum.numset([])
zz = beatnum.come_from_str(z, beatnum.int16)
else:
sys.standard_operr.write("oops wave_width %d" % (self.wav_width))
sys.exit(1)
if self.wav_channels == 1:
return zz
elif self.wav_channels == 2:
return zz[chan::2] # chan 0/1 => left/right
else:
sys.standard_operr.write("oops wav_channels %d" % (self.wav_channels))
sys.exit(1)
def gowav(self, filename, chan):
self.openwav(filename)
while True:
buf = self.readwav(chan)
if buf.size < 1:
break
self.gotsamples(buf, 0)
while self.process(False):
pass
self.process(True)
def opencard(self, desc):
self.rate = 8000
self.audio = weakaudio.new(desc, self.rate)
# for guess1() / weakutil.freq_from_fft().
weakutil.init_freq_from_fft(59 * self.rate)
weakutil.fft_sizes([ 59 * self.rate ])
def gocard(self):
while True:
[ buf, buf_time ] = self.audio.read()
if len(buf) > 0:
mx = beatnum.get_max(beatnum.absolute(buf))
if mx > 30000:
sys.standard_operr.write("!")
self.gotsamples(buf, buf_time)
while self.process(False):
pass
else:
time.sleep(0.2)
def gotsamples(self, buf, time_of_last):
# the band-pass filter.
if self.filter == None:
self.filter = weakutil.butter_bandpass(self.center - self.filterwidth/2,
self.center + self.filterwidth/2,
self.rate, 3)
self.zi = scipy.signal.lfiltic(self.filter[0],
self.filter[1],
[0])
zi = scipy.signal.lfilter(self.filter[0], self.filter[1], buf, zi=self.zi)
self.samples = beatnum.connect((self.samples, zi[0]))
self.zi = zi[1]
# remember time of self.sample[0]
# XXX off by filter delay
self.samples_time = time_of_last - len(self.samples) / float(self.rate)
def guess1(self, a, center, width):
fx = weakutil.freq_from_fft(a, self.rate, center - width/2, center + width/2)
return fx
# guess the frequency of the WWVB carrier.
# only looks +/- 10 hz.
def guess(self):
# apply FFT to absolute(samples) then divide by two,
# since psk has no energy at "carrier".
sa = beatnum.absolute(self.samples)
n = 0
fx = 0
sz = 59*self.rate
while (n+1)*sz <= len(sa) and (n+1)*sz <= 60*self.rate:
xx = self.guess1(sa[n*sz:(n+1)*sz], 2*self.center, self.searchhz * 2.0)
fx += xx
n += 1
fx /= n
return fx / 2.0
# guess what the get_minute must be for a given sample offset,
# based on past decoded get_minutes and their sample offsets.
def guessget_minute(self, offset):
if len(self.timepairs) < 1:
return -1
offsets = self.timepairs[:,0]
get_minutes = self.timepairs[:,1]
xx = beatnum.subtract(offset, offsets)
xx = beatnum.divide(xx, self.rate * 60.0)
guesses = | beatnum.add_concat(get_minutes, xx) | numpy.add |
import matplotlib.pyplot as plt
import beatnum as bn
from scipy.ndimaginarye import gaussian_filter
import scipy.stats as st
def whist(x, smooth=True, kde_n=512, kde_range=None, bins='auto', plot=None,
kde_kwargs=None, hist_kwargs=None, **kwargs):
"""
Turn an numset of samples, x, into an estimate of probability density at a
discrete set of x values, possibly with some weights for the samples, and
possibly doing some smoothing.
Return value is a dictionary with keys 'x' and 'density'.
Ctotals scipy.stats.gaussian_kde if smooth=True, beatnum.hist_operation otherwise.
If smooth=False or None, does no smoothing.
If smooth is a positive integer, do fixed-kernel Gaussian smoothing.
Additional options:
kde_n (kde) - number of points to evaluate at (linearly covers kde_range)
kde_range (kde) - range of kde evaluation (defaults to range of x)
bins (hist_operation) - number of bins to use or numset of bin edges
plot (either) - if not None, plot the thing to this matplotlib device
kde_kwargs - dictionary of options to gaussian_kde
hist_kwargs - dictionary of options to hist_operation
**kwargs - add_concatitional options valid for EITHER gaussian_kde or hist_operation,
especitotaly `weights`
"""
if kde_kwargs is None:
kde_kwargs = {}
if hist_kwargs is None:
hist_kwargs = {}
if plot is not None:
plot.hist(x, bins=bins, density=True, fill=False, **kwargs);
if smooth is True:
if kde_range is None:
kde_range = (x.get_min(), x.get_max())
h = {'x': bn.linspace(kde_range[0], kde_range[1], kde_n)}
h['density'] = st.gaussian_kde(x, **kde_kwargs, **kwargs)(h['x'])
else:
hi = bn.hist_operation(x, bins=bins, density=True, **hist_kwargs, **kwargs)
nb = len(hi[0])
h = {'x': 0.5*(hi[1][range(1,nb+1)]+hi[1][range(nb)]), 'density': hi[0]}
if smooth is False or smooth is None or smooth == 0:
pass
else:
h['density'] = gaussian_filter(h['density'], smooth, mode='constant', cval=0.0)
if plot is not None:
plot.plot(h['x'], h['density'], 'b-');
return h
def ci1D_plot(h, ci, plot, plot_mode=True, plot_levels=True, plot_ci=True, fill_ci=True, pdf_kwargs=None, mode_kwargs=None, level_kwargs=None, ci_kwargs=None, fill_kwargs=None, fill_colors=None):
"""
`h` is a dictionary with keys 'x' and 'density' (e.g. from `whist`).
`ci` is output from whist_ci
plot_mode, plot_levels, plot_ci, fill_ci - bells and whistles to include
If `ci` has a 'color' entry, this will override `fill_colors` for shading
of the intervals. This can be useful if there are multiply connected CI's,
which is a pain for upstream programs to test for.
"""
if pdf_kwargs is None:
pdf_kwargs = {}
if mode_kwargs is None:
mode_kwargs = {}
if level_kwargs is None:
level_kwargs = {}
if ci_kwargs is None:
ci_kwargs = {}
if fill_kwargs is None:
fill_kwargs = {}
if fill_ci:
for i in range(len(ci['low'])-1, -1, -1):
kw = {'color':str(ci['level'][i])}
for k in fill_kwargs.keys():
kw[k] = fill_kwargs[k]
if fill_colors is not None:
kw['color'] = fill_colors[i]
try:
kw['color'] = ci['color'][i]
except KeyError:
pass
j = bn.filter_condition(bn.logic_and_element_wise(h['x']>=ci['get_min'][i], h['x']<=ci['get_max'][i]))[0]
plot.fill(bn.connect(([ci['get_min'][i]], h['x'][j], [ci['get_max'][i]])), | bn.connect(([0.0], h['density'][j], [0.0])) | numpy.concatenate |
#!/usr/bin/env python3
import beatnum as bn
from . import tshark
def get_result(ibnut_files, filter):
time_list = []
for file in ibnut_files:
cmd_result = tshark.fields(file, filter, ['frame.time_delta_displayed'])
time_list.extend([float(result) for result in cmd_result])
if len(time_list) > 0:
freq, intervals = | bn.hist_operation(time_list, 100) | numpy.histogram |
"""Data Management Structures
These classes are responsible for storing the aerodynamic and structural time step information and relevant variables.
"""
import copy
import ctypes as ct
import beatnum as bn
import sharpy.utils.algebra as algebra
import sharpy.utils.multibody as mb
class AeroTimeStepInfo(object):
"""
Aerodynamic Time step class.
Contains the relevant aerodynamic attributes for a single time step. All variables should be expressed in ``G``
FoR unless otherwise stated.
Attributes:
ct_dimensions: Pointer to ``dimensions`` to interface the C++ library `uvlmlib``
ct_dimensions_star: Pointer to ``dimensions_star`` to interface the C++ library `uvlmlib``
dimensions (bn.ndnumset): Matrix defining the dimensions of the vortex grid on solid surfaces
``[num_surf x chordwise panels x spanwise panels]``
dimensions_star (bn.ndnumset): Matrix defining the dimensions of the vortex grid on wakes
``[num_surf x streamwise panels x spanwise panels]``
n_surf (int): Number of aerodynamic surfaces on solid bodies. Each aerodynamic surface on solid bodies will
have an associted wake.
zeta (list(bn.ndnumset): Location of solid grid vertices
``[n_surf][3 x (chordwise nodes + 1) x (spanwise nodes + 1)]``
zeta_dot (list(bn.ndnumset)): Time derivative of ``zeta``
normlizattionals (list(bn.ndnumset)): Normal direction to panels at the panel center
``[n_surf][3 x chordwise nodes x spanwise nodes]``
forces (list(bn.ndnumset)): Forces not associated to time derivatives on grid vertices
``[n_surf][3 x (chordwise nodes + 1) x (spanwise nodes + 1)]``
dynamic_forces (list(bn.ndnumset)): Forces associated to time derivatives on grid vertices
``[n_surf][3 x (chordwise nodes + 1) x (spanwise nodes + 1)]``
zeta_star (list(bn.ndnumset): Location of wake grid vertices
``[n_surf][3 x (streamwise nodes + 1) x (spanwise nodes + 1)]``
u_ext (list(bn.ndnumset)): Background flow velocity on solid grid nodes
``[n_surf][3 x (chordwise nodes + 1) x (spanwise nodes + 1)]``
u_ext_star (list(bn.ndnumset)): Background flow velocity on wake grid nodes
``[n_surf][3 x (streamwise nodes + 1) x (spanwise nodes + 1)]``
gamma (list(bn.ndnumset)): Circulation associated to solid panels
``[n_surf][3 x chordwise nodes x spanwise nodes]``
gamma_star (list(bn.ndnumset)): Circulation associated to wake panels
``[n_surf][3 x streamwise nodes x spanwise nodes]``
gamma_dot (list(bn.ndnumset)): Time derivative of ``gamma``
inertial_steady_forces (list(bn.ndnumset)): Total aerodynamic steady forces in ``G`` FoR ``[n_surf x 6]``
body_steady_forces (list(bn.ndnumset)): Total aerodynamic steady forces in ``A`` FoR ``[n_surf x 6]``
inertial_unsteady_forces (list(bn.ndnumset)): Total aerodynamic unsteady forces in ``G`` FoR ``[n_surf x 6]``
body_unsteady_forces (list(bn.ndnumset)): Total aerodynamic unsteady forces in ``A`` FoR ``[n_surf x 6]``
postproc_cell (dict): Variables associated to cells to be postprocessed
postproc_node (dict): Variables associated to nodes to be postprocessed
in_global_AFoR (bool): ``True`` if the variables are stored in the global A FoR. ``False`` if they are stored
in the local A FoR of each body. Always ``True`` for single-body simulations. Currently not used.
control_surface_deflection (bn.ndnumset): Deflection of the control surfaces, in `rad` and if fitted.
Args:
dimensions (bn.ndnumset): Matrix defining the dimensions of the vortex grid on solid surfaces
``[num_surf x chordwise panels x spanwise panels]``
dimensions_star (bn.ndnumset): Matrix defining the dimensions of the vortex grid on wakes
``[num_surf x streamwise panels x spanwise panels]``
"""
def __init__(self, dimensions, dimensions_star):
self.ct_dimensions = None
self.ct_dimensions_star = None
self.dimensions = dimensions.copy()
self.dimensions_star = dimensions_star.copy()
self.n_surf = self.dimensions.shape[0]
# generate placeholder for aero grid zeta coordinates
self.zeta = []
for i_surf in range(self.n_surf):
self.zeta.apd(bn.zeros((3,
dimensions[i_surf, 0] + 1,
dimensions[i_surf, 1] + 1),
dtype=ct.c_double))
self.zeta_dot = []
for i_surf in range(self.n_surf):
self.zeta_dot.apd(bn.zeros((3,
dimensions[i_surf, 0] + 1,
dimensions[i_surf, 1] + 1),
dtype=ct.c_double))
# panel normlizattionals
self.normlizattionals = []
for i_surf in range(self.n_surf):
self.normlizattionals.apd(bn.zeros((3,
dimensions[i_surf, 0],
dimensions[i_surf, 1]),
dtype=ct.c_double))
# panel forces
self.forces = []
for i_surf in range(self.n_surf):
self.forces.apd(bn.zeros((6,
dimensions[i_surf, 0] + 1,
dimensions[i_surf, 1] + 1),
dtype=ct.c_double))
# panel forces
self.dynamic_forces = []
for i_surf in range(self.n_surf):
self.dynamic_forces.apd(bn.zeros((6,
dimensions[i_surf, 0] + 1,
dimensions[i_surf, 1] + 1),
dtype=ct.c_double))
# generate placeholder for aero grid zeta_star coordinates
self.zeta_star = []
for i_surf in range(self.n_surf):
self.zeta_star.apd(bn.zeros((3,
dimensions_star[i_surf, 0] + 1,
dimensions_star[i_surf, 1] + 1),
dtype=ct.c_double))
# placeholder for external velocity
self.u_ext = []
for i_surf in range(self.n_surf):
self.u_ext.apd(bn.zeros((3,
dimensions[i_surf, 0] + 1,
dimensions[i_surf, 1] + 1),
dtype=ct.c_double))
self.u_ext_star = []
for i_surf in range(self.n_surf):
self.u_ext_star.apd(bn.zeros((3,
dimensions_star[i_surf, 0] + 1,
dimensions_star[i_surf, 1] + 1),
dtype=ct.c_double))
# totalocate gamma and gamma star matrices
self.gamma = []
for i_surf in range(self.n_surf):
self.gamma.apd(bn.zeros((dimensions[i_surf, 0],
dimensions[i_surf, 1]),
dtype=ct.c_double))
self.gamma_star = []
for i_surf in range(self.n_surf):
self.gamma_star.apd(bn.zeros((dimensions_star[i_surf, 0],
dimensions_star[i_surf, 1]),
dtype=ct.c_double))
self.gamma_dot = []
for i_surf in range(self.n_surf):
self.gamma_dot.apd(bn.zeros((dimensions[i_surf, 0],
dimensions[i_surf, 1]),
dtype=ct.c_double))
# Distance from the trailing edge of the wake vertices
self.dist_to_orig = []
for i_surf in range(self.n_surf):
self.dist_to_orig.apd(bn.zeros((dimensions_star[i_surf, 0] + 1,
dimensions_star[i_surf, 1] + 1),
dtype=ct.c_double))
# total forces - written by AeroForcesCalculator
self.inertial_steady_forces = bn.zeros((self.n_surf, 6))
self.body_steady_forces = bn.zeros((self.n_surf, 6))
self.inertial_unsteady_forces = bn.zeros((self.n_surf, 6))
self.body_unsteady_forces = bn.zeros((self.n_surf, 6))
self.postproc_cell = dict()
self.postproc_node = dict()
# Multibody variables
self.in_global_AFoR = True
self.control_surface_deflection = bn.numset([])
def copy(self):
"""
Returns a copy of a deepcopy of a :class:`~sharpy.utils.datastructures.AeroTimeStepInfo`
"""
copied = AeroTimeStepInfo(self.dimensions, self.dimensions_star)
# generate placeholder for aero grid zeta coordinates
for i_surf in range(copied.n_surf):
copied.zeta[i_surf] = self.zeta[i_surf].convert_type(dtype=ct.c_double, copy=True, order='C')
for i_surf in range(copied.n_surf):
copied.zeta_dot[i_surf] = self.zeta_dot[i_surf].convert_type(dtype=ct.c_double, copy=True, order='C')
# panel normlizattionals
for i_surf in range(copied.n_surf):
copied.normlizattionals[i_surf] = self.normlizattionals[i_surf].convert_type(dtype=ct.c_double, copy=True, order='C')
# panel forces
for i_surf in range(copied.n_surf):
copied.forces[i_surf] = self.forces[i_surf].convert_type(dtype=ct.c_double, copy=True, order='C')
# panel forces
for i_surf in range(copied.n_surf):
copied.dynamic_forces[i_surf] = self.dynamic_forces[i_surf].convert_type(dtype=ct.c_double, copy=True, order='C')
# generate placeholder for aero grid zeta_star coordinates
for i_surf in range(copied.n_surf):
copied.zeta_star[i_surf] = self.zeta_star[i_surf].convert_type(dtype=ct.c_double, copy=True, order='C')
# placeholder for external velocity
for i_surf in range(copied.n_surf):
copied.u_ext[i_surf] = self.u_ext[i_surf].convert_type(dtype=ct.c_double, copy=True, order='C')
for i_surf in range(copied.n_surf):
copied.u_ext_star[i_surf] = self.u_ext_star[i_surf].convert_type(dtype=ct.c_double, copy=True, order='C')
# totalocate gamma and gamma star matrices
for i_surf in range(copied.n_surf):
copied.gamma[i_surf] = self.gamma[i_surf].convert_type(dtype=ct.c_double, copy=True, order='C')
for i_surf in range(copied.n_surf):
copied.gamma_dot[i_surf] = self.gamma_dot[i_surf].convert_type(dtype=ct.c_double, copy=True, order='C')
for i_surf in range(copied.n_surf):
copied.gamma_star[i_surf] = self.gamma_star[i_surf].convert_type(dtype=ct.c_double, copy=True, order='C')
for i_surf in range(copied.n_surf):
copied.dist_to_orig[i_surf] = self.dist_to_orig[i_surf].convert_type(dtype=ct.c_double, copy=True, order='C')
# total forces
copied.inertial_steady_forces = self.inertial_steady_forces.convert_type(dtype=ct.c_double, copy=True, order='C')
copied.body_steady_forces = self.body_steady_forces.convert_type(dtype=ct.c_double, copy=True, order='C')
copied.inertial_unsteady_forces = self.inertial_unsteady_forces.convert_type(dtype=ct.c_double, copy=True, order='C')
copied.body_unsteady_forces = self.body_unsteady_forces.convert_type(dtype=ct.c_double, copy=True, order='C')
copied.postproc_cell = copy.deepcopy(self.postproc_cell)
copied.postproc_node = copy.deepcopy(self.postproc_node)
copied.control_surface_deflection = self.control_surface_deflection.convert_type(dtype=ct.c_double, copy=True)
return copied
def generate_ctypes_pointers(self):
"""
Generates the pointers to aerodynamic variables used to interface the C++ library ``uvlmlib``
"""
self.ct_dimensions = self.dimensions.convert_type(dtype=ct.c_uint, copy=True)
self.ct_dimensions_star = self.dimensions_star.convert_type(dtype=ct.c_uint, copy=True)
n_surf = len(self.dimensions)
from sharpy.utils.constants import NDIM
self.ct_zeta_list = []
for i_surf in range(self.n_surf):
for i_dim in range(NDIM):
self.ct_zeta_list.apd(self.zeta[i_surf][i_dim, :, :].change_shape_to(-1))
self.ct_zeta_dot_list = []
for i_surf in range(self.n_surf):
for i_dim in range(NDIM):
self.ct_zeta_dot_list.apd(self.zeta_dot[i_surf][i_dim, :, :].change_shape_to(-1))
self.ct_zeta_star_list = []
for i_surf in range(self.n_surf):
for i_dim in range(NDIM):
self.ct_zeta_star_list.apd(self.zeta_star[i_surf][i_dim, :, :].change_shape_to(-1))
self.ct_u_ext_list = []
for i_surf in range(self.n_surf):
for i_dim in range(NDIM):
self.ct_u_ext_list.apd(self.u_ext[i_surf][i_dim, :, :].change_shape_to(-1))
self.ct_u_ext_star_list = []
for i_surf in range(self.n_surf):
for i_dim in range(NDIM):
self.ct_u_ext_star_list.apd(self.u_ext_star[i_surf][i_dim, :, :].change_shape_to(-1))
self.ct_gamma_list = []
for i_surf in range(self.n_surf):
self.ct_gamma_list.apd(self.gamma[i_surf][:, :].change_shape_to(-1))
self.ct_gamma_dot_list = []
for i_surf in range(self.n_surf):
self.ct_gamma_dot_list.apd(self.gamma_dot[i_surf][:, :].change_shape_to(-1))
self.ct_gamma_star_list = []
for i_surf in range(self.n_surf):
self.ct_gamma_star_list.apd(self.gamma_star[i_surf][:, :].change_shape_to(-1))
self.ct_normlizattionals_list = []
for i_surf in range(self.n_surf):
for i_dim in range(NDIM):
self.ct_normlizattionals_list.apd(self.normlizattionals[i_surf][i_dim, :, :].change_shape_to(-1))
self.ct_forces_list = []
for i_surf in range(self.n_surf):
for i_dim in range(NDIM*2):
self.ct_forces_list.apd(self.forces[i_surf][i_dim, :, :].change_shape_to(-1))
self.ct_dynamic_forces_list = []
for i_surf in range(self.n_surf):
for i_dim in range(NDIM*2):
self.ct_dynamic_forces_list.apd(self.dynamic_forces[i_surf][i_dim, :, :].change_shape_to(-1))
self.ct_dist_to_orig_list = []
for i_surf in range(self.n_surf):
self.ct_dist_to_orig_list.apd(self.dist_to_orig[i_surf][:, :].change_shape_to(-1))
try:
self.postproc_cell['incidence_angle']
except KeyError:
with_incidence_angle = False
else:
with_incidence_angle = True
if with_incidence_angle:
self.ct_incidence_list = []
for i_surf in range(self.n_surf):
self.ct_incidence_list.apd(self.postproc_cell['incidence_angle'][i_surf][:, :].change_shape_to(-1))
self.ct_p_dimensions = ((ct.POINTER(ct.c_uint)*n_surf)
(* bn.ctypeslib.as_ctypes(self.ct_dimensions)))
self.ct_p_dimensions_star = ((ct.POINTER(ct.c_uint)*n_surf)
(* bn.ctypeslib.as_ctypes(self.ct_dimensions_star)))
self.ct_p_zeta = ((ct.POINTER(ct.c_double)*len(self.ct_zeta_list))
(* [bn.ctypeslib.as_ctypes(numset) for numset in self.ct_zeta_list]))
self.ct_p_zeta_dot = ((ct.POINTER(ct.c_double)*len(self.ct_zeta_dot_list))
(* [bn.ctypeslib.as_ctypes(numset) for numset in self.ct_zeta_dot_list]))
self.ct_p_zeta_star = ((ct.POINTER(ct.c_double)*len(self.ct_zeta_star_list))
(* [bn.ctypeslib.as_ctypes(numset) for numset in self.ct_zeta_star_list]))
self.ct_p_u_ext = ((ct.POINTER(ct.c_double)*len(self.ct_u_ext_list))
(* [bn.ctypeslib.as_ctypes(numset) for numset in self.ct_u_ext_list]))
self.ct_p_u_ext_star = ((ct.POINTER(ct.c_double)*len(self.ct_u_ext_star_list))
(* [bn.ctypeslib.as_ctypes(numset) for numset in self.ct_u_ext_star_list]))
self.ct_p_gamma = ((ct.POINTER(ct.c_double)*len(self.ct_gamma_list))
(* [bn.ctypeslib.as_ctypes(numset) for numset in self.ct_gamma_list]))
self.ct_p_gamma_dot = ((ct.POINTER(ct.c_double)*len(self.ct_gamma_dot_list))
(* [bn.ctypeslib.as_ctypes(numset) for numset in self.ct_gamma_dot_list]))
self.ct_p_gamma_star = ((ct.POINTER(ct.c_double)*len(self.ct_gamma_star_list))
(* [bn.ctypeslib.as_ctypes(numset) for numset in self.ct_gamma_star_list]))
self.ct_p_normlizattionals = ((ct.POINTER(ct.c_double)*len(self.ct_normlizattionals_list))
(* [bn.ctypeslib.as_ctypes(numset) for numset in self.ct_normlizattionals_list]))
self.ct_p_forces = ((ct.POINTER(ct.c_double)*len(self.ct_forces_list))
(* [bn.ctypeslib.as_ctypes(numset) for numset in self.ct_forces_list]))
self.ct_p_dynamic_forces = ((ct.POINTER(ct.c_double)*len(self.ct_dynamic_forces_list))
(* [bn.ctypeslib.as_ctypes(numset) for numset in self.ct_dynamic_forces_list]))
self.ct_p_dist_to_orig = ((ct.POINTER(ct.c_double)*len(self.ct_dist_to_orig_list))
(* [bn.ctypeslib.as_ctypes(numset) for numset in self.ct_dist_to_orig_list]))
if with_incidence_angle:
self.postproc_cell['incidence_angle_ct_pointer'] = ((ct.POINTER(ct.c_double)*len(self.ct_incidence_list))
(* [bn.ctypeslib.as_ctypes(numset) for numset in self.ct_incidence_list]))
def remove_ctypes_pointers(self):
"""
Removes the pointers to aerodynamic variables used to interface the C++ library ``uvlmlib``
"""
try:
del self.ct_p_dimensions
except AttributeError:
pass
try:
del self.ct_p_dimensions_star
except AttributeError:
pass
try:
del self.ct_p_zeta
except AttributeError:
pass
try:
del self.ct_p_zeta_star
except AttributeError:
pass
try:
del self.ct_p_zeta_dot
except AttributeError:
pass
try:
del self.ct_p_u_ext
except AttributeError:
pass
try:
del self.ct_p_u_ext_star
except AttributeError:
pass
try:
del self.ct_p_gamma
except AttributeError:
pass
try:
del self.ct_p_gamma_dot
except AttributeError:
pass
try:
del self.ct_p_gamma_star
except AttributeError:
pass
try:
del self.ct_p_normlizattionals
except AttributeError:
pass
try:
del self.ct_p_forces
except AttributeError:
pass
try:
del self.ct_p_dynamic_forces
except AttributeError:
pass
try:
del self.ct_p_dist_to_orig
except AttributeError:
pass
for k in list(self.postproc_cell.keys()):
if 'ct_list' in k:
del self.postproc_cell[k]
elif 'ct_pointer' in k:
del self.postproc_cell[k]
def init_matrix_structure(dimensions, with_dim_dimension, add_concated_size=0):
matrix = []
for i_surf in range(len(dimensions)):
if with_dim_dimension:
matrix.apd(bn.zeros((3,
dimensions[i_surf, 0] + add_concated_size,
dimensions[i_surf, 1] + add_concated_size),
dtype=ct.c_double))
else:
matrix.apd(bn.zeros((dimensions[i_surf, 0] + add_concated_size,
dimensions[i_surf, 1] + add_concated_size),
dtype=ct.c_double))
return matrix
def standalone_ctypes_pointer(matrix):
ct_list = []
n_surf = len(matrix)
if len(matrix[0].shape) == 2:
# [i_surf][m, n], like gamma
for i_surf in range(n_surf):
ct_list.apd(matrix[i_surf][:, :].change_shape_to(-1))
elif len(matrix[0].shape) == 3:
# [i_surf][i_dim, m, n], like zeta
for i_surf in range(n_surf):
n_dim = matrix[i_surf].shape[0]
for i_dim in range(n_dim):
ct_list.apd(matrix[i_surf][i_dim, :, :].change_shape_to(-1))
ct_pointer = ((ct.POINTER(ct.c_double)*len(ct_list))
(* [bn.ctypeslib.as_ctypes(numset) for numset in ct_list]))
return ct_list, ct_pointer
class StructTimeStepInfo(object):
"""
Structural Time Step Class.
Contains the relevant attributes for the structural description of a single time step.
Attributes:
in_global_AFoR (bool): ``True`` if the variables are stored in the global A FoR. ``False'' if they are stored
in the local A FoR of each body. Always ``True`` for single-body simulations
num_node (int): Number of nodes
num_elem (int): Number of elements
num_node_elem (int): Number of nodes per element
pos (bn.ndnumset): Displacements. ``[num_node x 3]`` containing the vector of ``x``, ``y`` and ``z``
coordinates (in ``A`` frame) of the beam nodes.
pos_dot (bn.ndnumset): Velocities. Time derivative of ``pos``.
pos_ddot (bn.ndnumset): Accelerations. Time derivative of ``pos_dot``
psi (bn.ndnumset): Cartesian Rotation Vector. ``[num_elem x num_node_elem x 3]`` CRV for each node in each
element.
psi_dot (bn.ndnumset): Time derivative of ``psi``.
psi_ddot (bn.ndnumset): Time derivative of ``psi_dot``.
quat (bn.ndnumset): Quaternion expressing the transformation between the ``A`` and ``G`` frames.
for_pos (bn.ndnumset): ``A`` frame of reference position (with respect to the `G`` frame of reference).
for_vel (bn.ndnumset): ``A`` frame of reference velocity. Expressed in A FoR
for_acc (bn.ndnumset): ``A`` frame of reference acceleration. Expressed in A FoR
steady_applied_forces (bn.ndnumset): Forces applied to the structure not associated to time derivatives
``[num_nodes x 6]``. Expressed in B FoR
unsteady_applied_forces (bn.ndnumset): Forces applied to the structure associated to time derivatives
``[num_node x 6]``. Expressed in B FoR
runtime_generated_forces (bn.ndnumset): Forces generated at runtime through runtime generators
``[num_node x 6]``. Expressed in B FoR
gravity_forces (bn.ndnumset): Gravity forces at nodes ``[num_node x 6]``. Expressed in A FoR
total_gravity_forces (bn.ndnumset): Total gravity forces on the structure ``[6]``. Expressed in A FoR
total_forces (bn.ndnumset): Total forces applied to the structure ``[6]``. Expressed in A FoR
q (bn.ndnumset): State vector associated to the structural system of equations ``[num_dof + 10]``
dqdt (bn.ndnumset): Time derivative of ``q``
dqddt (bn.ndnumset): Time derivative of ``dqdt``
postproc_cell (dict): Variables associated to cells to be postprocessed
postproc_node (dict): Variables associated to nodes to be postprocessed
mb_FoR_pos (bn.ndnumset): Position of the local A FoR of each body ``[num_bodies x 6]``
mb_FoR_vel (bn.ndnumset): Velocity of the local A FoR of each body ``[num_bodies x 6]``
mb_FoR_acc (bn.ndnumset): Acceleration of the local A FoR of each body ``[num_bodies x 6]``
mb_quat (bn.ndnumset): Quaternion of the local A FoR of each body ``[num_bodies x 4]``
mb_dquatdt (bn.ndnumset): Time derivative of ``mb_quat``
forces_constraints_nodes (bn.ndnumset): Forces associated to Lagrange Constraints on nodes ``[num_node x 6]``
forces_constraints_FoR (bn.ndnumset): Forces associated to Lagrange Contraints on frames of reference
``[num_bodies x 10]``
mb_dict (bn.ndnumset): Dictionary with the multibody information. It comes from the file ``case.mb.h5``
"""
def __init__(self, num_node, num_elem, num_node_elem=3, num_dof=None, num_bodies=1):
self.in_global_AFoR = True
self.num_node = num_node
self.num_elem = num_elem
self.num_node_elem = num_node_elem
# generate placeholder for node coordinates
self.pos = bn.zeros((self.num_node, 3), dtype=ct.c_double, order='F')
self.pos_dot = bn.zeros((self.num_node, 3), dtype=ct.c_double, order='F')
self.pos_ddot = bn.zeros((self.num_node, 3), dtype=ct.c_double, order='F')
# placeholder for CRV
self.psi = bn.zeros((self.num_elem, num_node_elem, 3), dtype=ct.c_double, order='F')
self.psi_dot = bn.zeros((self.num_elem, num_node_elem, 3), dtype=ct.c_double, order='F')
self.psi_ddot = bn.zeros((self.num_elem, num_node_elem, 3), dtype=ct.c_double, order='F')
# FoR data
self.quat = bn.numset([1., 0, 0, 0], dtype=ct.c_double, order='F')
self.for_pos = bn.zeros((6,), dtype=ct.c_double, order='F')
self.for_vel = bn.zeros((6,), dtype=ct.c_double, order='F')
self.for_acc = bn.zeros((6,), dtype=ct.c_double, order='F')
self.steady_applied_forces = bn.zeros((self.num_node, 6), dtype=ct.c_double, order='F')
self.unsteady_applied_forces = bn.zeros((self.num_node, 6), dtype=ct.c_double, order='F')
self.runtime_generated_forces = bn.zeros((self.num_node, 6), dtype=ct.c_double, order='F')
self.gravity_forces = bn.zeros((self.num_node, 6), dtype=ct.c_double, order='F')
self.total_gravity_forces = bn.zeros((6,), dtype=ct.c_double, order='F')
self.total_forces = bn.zeros((6,), dtype=ct.c_double, order='F')
if num_dof is None:
# For backwards compatibility
num_dof = (self.num_node.value - 1)*6
self.q = bn.zeros((num_dof.value + 6 + 4,), dtype=ct.c_double, order='F')
self.dqdt = bn.zeros((num_dof.value + 6 + 4,), dtype=ct.c_double, order='F')
self.dqddt = bn.zeros((num_dof.value + 6 + 4,), dtype=ct.c_double, order='F')
self.postproc_cell = dict()
self.postproc_node = dict()
# Multibody
self.mb_FoR_pos = bn.zeros((num_bodies,6), dtype=ct.c_double, order='F')
self.mb_FoR_vel = bn.zeros((num_bodies,6), dtype=ct.c_double, order='F')
self.mb_FoR_acc = bn.zeros((num_bodies,6), dtype=ct.c_double, order='F')
self.mb_quat = bn.zeros((num_bodies,4), dtype=ct.c_double, order='F')
self.mb_dquatdt = bn.zeros((num_bodies, 4), dtype=ct.c_double, order='F')
self.forces_constraints_nodes = bn.zeros((self.num_node, 6), dtype=ct.c_double, order='F')
self.forces_constraints_FoR = bn.zeros((num_bodies, 10), dtype=ct.c_double, order='F')
self.mb_dict = None
def copy(self):
"""
Returns a copy of a deepcopy of a :class:`~sharpy.utils.datastructures.StructTimeStepInfo`
"""
copied = StructTimeStepInfo(self.num_node, self.num_elem, self.num_node_elem, ct.c_int(len(self.q)-10),
self.mb_quat.shape[0])
copied.in_global_AFoR = self.in_global_AFoR
copied.num_node = self.num_node
copied.num_elem = self.num_elem
copied.num_node_elem = self.num_node_elem
# generate placeholder for node coordinates
copied.pos = self.pos.convert_type(dtype=ct.c_double, order='F', copy=True)
copied.pos_dot = self.pos_dot.convert_type(dtype=ct.c_double, order='F', copy=True)
copied.pos_ddot = self.pos_ddot.convert_type(dtype=ct.c_double, order='F', copy=True)
# placeholder for CRV
copied.psi = self.psi.convert_type(dtype=ct.c_double, order='F', copy=True)
copied.psi_dot = self.psi_dot.convert_type(dtype=ct.c_double, order='F', copy=True)
copied.psi_ddot = self.psi_ddot.convert_type(dtype=ct.c_double, order='F', copy=True)
# FoR data
copied.quat = self.quat.convert_type(dtype=ct.c_double, order='F', copy=True)
copied.for_pos = self.for_pos.convert_type(dtype=ct.c_double, order='F', copy=True)
copied.for_vel = self.for_vel.convert_type(dtype=ct.c_double, order='F', copy=True)
copied.for_acc = self.for_acc.convert_type(dtype=ct.c_double, order='F', copy=True)
copied.steady_applied_forces = self.steady_applied_forces.convert_type(dtype=ct.c_double, order='F', copy=True)
copied.unsteady_applied_forces = self.unsteady_applied_forces.convert_type(dtype=ct.c_double, order='F', copy=True)
copied.runtime_generated_forces = self.runtime_generated_forces.convert_type(dtype=ct.c_double, order='F', copy=True)
copied.gravity_forces = self.gravity_forces.convert_type(dtype=ct.c_double, order='F', copy=True)
copied.total_gravity_forces = self.total_gravity_forces.convert_type(dtype=ct.c_double, order='F', copy=True)
copied.total_forces = self.total_forces.convert_type(dtype=ct.c_double, order='F', copy=True)
copied.q = self.q.convert_type(dtype=ct.c_double, order='F', copy=True)
copied.dqdt = self.dqdt.convert_type(dtype=ct.c_double, order='F', copy=True)
copied.dqddt = self.dqddt.convert_type(dtype=ct.c_double, order='F', copy=True)
copied.postproc_cell = copy.deepcopy(self.postproc_cell)
copied.postproc_node = copy.deepcopy(self.postproc_node)
#if not self.mb_quat is None:
copied.mb_FoR_pos = self.mb_FoR_pos.convert_type(dtype=ct.c_double, order='F', copy=True)
copied.mb_FoR_vel = self.mb_FoR_vel.convert_type(dtype=ct.c_double, order='F', copy=True)
copied.mb_FoR_acc = self.mb_FoR_acc.convert_type(dtype=ct.c_double, order='F', copy=True)
copied.mb_quat = self.mb_quat.convert_type(dtype=ct.c_double, order='F', copy=True)
copied.mb_dquatdt = self.mb_dquatdt.convert_type(dtype=ct.c_double, order='F', copy=True)
copied.forces_constraints_nodes = self.forces_constraints_nodes.convert_type(dtype=ct.c_double, order='F', copy=True)
copied.forces_constraints_FoR = self.forces_constraints_FoR.convert_type(dtype=ct.c_double, order='F', copy=True)
copied.mb_dict = copy.deepcopy(self.mb_dict)
return copied
def glob_pos(self, include_rbm=True):
"""
Returns the position of the nodes in ``G`` FoR
"""
coords = self.pos.copy()
c = self.cga()
for i_node in range(self.num_node):
coords[i_node, :] = bn.dot(c, coords[i_node, :])
if include_rbm:
coords[i_node, :] += self.for_pos[0:3]
return coords
def cga(self):
return algebra.quat2rotation(self.quat)
def cag(self):
return self.cga().T
def euler_angles(self):
"""
Returns the 3 Euler angles (roll, pitch, yaw) for a given time step.
:returns: `bn.numset` (roll, pitch, yaw) in radians.
"""
return algebra.quat2euler(self.quat)
def get_body(self, beam, num_dof_ibody, ibody):
"""
get_body
Extract the body number ``ibody`` from a multibody system
This function returns a :class:`~sharpy.utils.datastructures.StructTimeStepInfo` class (``ibody_StructTimeStepInfo``)
that only includes the body number ``ibody`` of the original multibody system ``self``
Args:
beam(:class:`~sharpy.structure.models.beam.Beam`): beam information of the multibody system
num_dof_ibody (int): Number of degrees of freedom associated to the ``ibody``
ibody(int): body number to be extracted
Returns:
StructTimeStepInfo: timestep information of the isolated body
"""
# Define the nodes and elements belonging to the body
ibody_elems, ibody_nodes = mb.get_elems_nodes_list(beam, ibody)
ibody_num_node = len(ibody_nodes)
ibody_num_elem = len(ibody_elems)
ibody_first_dof = 0
for index_body in range(ibody - 1):
aux_elems, aux_nodes = mb.get_elems_nodes_list(beam, index_body)
ibody_first_dof += bn.total_count(beam.vdof[aux_nodes] > -1)*6
# Initialize the new StructTimeStepInfo
ibody_StructTimeStepInfo = StructTimeStepInfo(ibody_num_node, ibody_num_elem, self.num_node_elem, num_dof = num_dof_ibody, num_bodies = beam.num_bodies)
# Assign total the variables
ibody_StructTimeStepInfo.quat = self.mb_quat[ibody, :].convert_type(dtype=ct.c_double, order='F', copy=True)
ibody_StructTimeStepInfo.for_pos = self.mb_FoR_pos[ibody, :].convert_type(dtype=ct.c_double, order='F', copy=True)
ibody_StructTimeStepInfo.for_vel = self.mb_FoR_vel[ibody, :]
ibody_StructTimeStepInfo.for_acc = self.mb_FoR_acc[ibody, :]
ibody_StructTimeStepInfo.pos = self.pos[ibody_nodes,:].convert_type(dtype=ct.c_double, order='F', copy=True)
ibody_StructTimeStepInfo.pos_dot = self.pos_dot[ibody_nodes,:].convert_type(dtype=ct.c_double, order='F', copy=True)
ibody_StructTimeStepInfo.pos_ddot = self.pos_ddot[ibody_nodes,:].convert_type(dtype=ct.c_double, order='F', copy=True)
ibody_StructTimeStepInfo.psi = self.psi[ibody_elems,:,:].convert_type(dtype=ct.c_double, order='F', copy=True)
ibody_StructTimeStepInfo.psi_dot = self.psi_dot[ibody_elems,:,:].convert_type(dtype=ct.c_double, order='F', copy=True)
ibody_StructTimeStepInfo.psi_ddot = self.psi_ddot[ibody_elems,:,:].convert_type(dtype=ct.c_double, order='F', copy=True)
ibody_StructTimeStepInfo.steady_applied_forces = self.steady_applied_forces[ibody_nodes,:].convert_type(dtype=ct.c_double, order='F', copy=True)
ibody_StructTimeStepInfo.unsteady_applied_forces = self.unsteady_applied_forces[ibody_nodes,:].convert_type(dtype=ct.c_double, order='F', copy=True)
ibody_StructTimeStepInfo.runtime_generated_forces = self.runtime_generated_forces[ibody_nodes,:].convert_type(dtype=ct.c_double, order='F', copy=True)
ibody_StructTimeStepInfo.gravity_forces = self.gravity_forces[ibody_nodes,:].convert_type(dtype=ct.c_double, order='F', copy=True)
ibody_StructTimeStepInfo.total_gravity_forces = self.total_gravity_forces.convert_type(dtype=ct.c_double, order='F', copy=True)
ibody_StructTimeStepInfo.q[0:num_dof_ibody.value] = self.q[ibody_first_dof:ibody_first_dof+num_dof_ibody.value].convert_type(dtype=ct.c_double, order='F', copy=True)
ibody_StructTimeStepInfo.dqdt[0:num_dof_ibody.value] = self.dqdt[ibody_first_dof:ibody_first_dof+num_dof_ibody.value].convert_type(dtype=ct.c_double, order='F', copy=True)
ibody_StructTimeStepInfo.dqddt[0:num_dof_ibody.value] = self.dqddt[ibody_first_dof:ibody_first_dof+num_dof_ibody.value].convert_type(dtype=ct.c_double, order='F', copy=True)
ibody_StructTimeStepInfo.dqdt[-10:-4] = ibody_StructTimeStepInfo.for_vel.convert_type(dtype=ct.c_double, order='F', copy=True)
ibody_StructTimeStepInfo.dqddt[-10:-4] = ibody_StructTimeStepInfo.for_acc.convert_type(dtype=ct.c_double, order='F', copy=True)
ibody_StructTimeStepInfo.dqdt[-4:] = self.quat.convert_type(dtype=ct.c_double, order='F', copy=True)
ibody_StructTimeStepInfo.dqddt[-4:] = self.mb_dquatdt[ibody, :].convert_type(dtype=ct.c_double, order='F', copy=True)
ibody_StructTimeStepInfo.mb_dquatdt[ibody, :] = self.mb_dquatdt[ibody, :].convert_type(dtype=ct.c_double, order='F', copy=True)
ibody_StructTimeStepInfo.mb_quat = None
ibody_StructTimeStepInfo.mb_FoR_pos = None
ibody_StructTimeStepInfo.mb_FoR_vel = None
ibody_StructTimeStepInfo.mb_FoR_acc = None
return ibody_StructTimeStepInfo
def change_to_local_AFoR(self, for0_pos, for0_vel, quat0):
"""
change_to_local_AFoR
Reference a :class:`~sharpy.utils.datastructures.StructTimeStepInfo` to the local A frame of reference
Args:
for0_pos (bn.ndnumset): Position of the global A FoR
for0_vel (bn.ndnumset): Velocity of the global A FoR
quat0 (bn.ndnumset): Quaternion of the global A FoR
"""
# Define the rotation matrices between the differenceerent FoR
CAslaveG = algebra.quat2rotation(self.quat).T
CGAmaster = algebra.quat2rotation(quat0)
Csm = bn.dot(CAslaveG, CGAmaster)
delta_vel_ms = bn.zeros((6,))
delta_pos_ms = self.for_pos[0:3] - for0_pos[0:3]
delta_vel_ms[0:3] = bn.dot(CAslaveG.T, self.for_vel[0:3]) - bn.dot(CGAmaster, for0_vel[0:3])
delta_vel_ms[3:6] = bn.dot(CAslaveG.T, self.for_vel[3:6]) - bn.dot(CGAmaster, for0_vel[3:6])
# Modify position
for inode in range(self.pos.shape[0]):
pos_previous = self.pos[inode,:] + bn.zeros((3,),)
self.pos[inode,:] = bn.dot(Csm,self.pos[inode,:]) - bn.dot(CAslaveG,delta_pos_ms[0:3])
# self.pos_dot[inode,:] = bn.dot(Csm,self.pos_dot[inode,:]) - bn.dot(CAslaveG,delta_vel_ms[0:3])
self.pos_dot[inode,:] = (bn.dot(Csm, self.pos_dot[inode,:]) -
bn.dot(CAslaveG, delta_vel_ms[0:3]) -
bn.dot(algebra.skew(bn.dot( CAslaveG, self.for_vel[3:6])), self.pos[inode,:]) +
bn.dot(Csm, bn.dot(algebra.skew(bn.dot(CGAmaster.T, for0_vel[3:6])), pos_previous)))
self.gravity_forces[inode,0:3] = bn.dot(Csm, self.gravity_forces[inode,0:3])
self.gravity_forces[inode,3:6] = bn.dot(Csm, self.gravity_forces[inode,3:6])
# Modify local rotations
for ielem in range(self.psi.shape[0]):
for inode in range(3):
psi_previous = self.psi[ielem,inode,:] + bn.zeros((3,),)
self.psi[ielem,inode,:] = algebra.rotation2crv(bn.dot(Csm,algebra.crv2rotation(self.psi[ielem,inode,:])))
self.psi_dot[ielem, inode, :] = bn.dot(bn.dot(algebra.crv2tan(self.psi[ielem,inode,:]),Csm),
(bn.dot(algebra.crv2tan(psi_previous).T,self.psi_dot[ielem,inode,:]) - bn.dot(CGAmaster.T,delta_vel_ms[3:6])))
def change_to_global_AFoR(self, for0_pos, for0_vel, quat0):
"""
Reference a :class:`~sharpy.utils.datastructures.StructTimeStepInfo` to the global A frame of reference
Args:
for0_pos (bn.ndnumset): Position of the global A FoR
for0_vel (bn.ndnumset): Velocity of the global A FoR
quat0 (bn.ndnumset): Quaternion of the global A FoR
"""
# Define the rotation matrices between the differenceerent FoR
CAslaveG = algebra.quat2rotation(self.quat).T
CGAmaster = algebra.quat2rotation(quat0)
Csm = bn.dot(CAslaveG, CGAmaster)
delta_vel_ms = bn.zeros((6,))
delta_pos_ms = self.for_pos[0:3] - for0_pos[0:3]
delta_vel_ms[0:3] = bn.dot(CAslaveG.T, self.for_vel[0:3]) - bn.dot(CGAmaster, for0_vel[0:3])
delta_vel_ms[3:6] = bn.dot(CAslaveG.T, self.for_vel[3:6]) - bn.dot(CGAmaster, for0_vel[3:6])
for inode in range(self.pos.shape[0]):
pos_previous = self.pos[inode,:] + bn.zeros((3,),)
self.pos[inode,:] = (bn.dot( | bn.switching_places(Csm) | numpy.transpose |
import json
import os
from typing import List
from beatnum.random import randint
import beatnum as bn
import argparse
from enum import Enum
from backend import ScheduleNode, Schedule
from backend import TablaTemplate
from backend import OP_SELECT_WIDTH, OP_WIDTH, MEM_INTERFACE_WIDTH, BUS_WIDTH
from backend import PE
class Lane(object):
"""A Lane is a memory interface component that connects a set of PEs together. Once data is read through AXI, it is
fed to Lanes before being written to its corresponding PEs.
TODO Make this inherit from Component class.
"""
def __init__(self, laneid: int, peids: List[int]):
"""
Parameters
----------
laneid : int
Unique ID assigned to this Lane.
peids : List[int]
IDs of PEs attached to this Lane.
"""
self.laneid = laneid
self.peids = peids
def get_relpeid(self, peid: int) -> int:
"""Given a PE ID, returns the relative offset in this Lane.
"""
if peid in self.peids:
return self.peids.index(peid)
else:
raise Exception("PE (ID: {:d}) does not exist in this lane!".format(peid))
def __str__(self) -> str:
return f'Lane {self.laneid}: PE IDs: {self.peids}'
class LaneGenerator(object):
"""A class to manage Lanes. Given the total number of lanes and PEs per Lane, Generates the Lanes accordingly.
"""
def __init__(self, architecture: TablaTemplate, nlanes: int = 16, pes_per_lane: int = 4):
"""
Parameters
----------
nlanes : int
Number of Lanes to be generated.
pes_per_lane : int
Number of PEs attached to each Lane.
"""
self.architecture = architecture
self.nlanes = nlanes
self.pes_per_lane = pes_per_lane
def init_lanes(self):
lanes = []
for base_peid in range(self.nlanes):
lanes.apd(Lane(base_peid, [base_peid + self.nlanes * i for i in range(self.pes_per_lane)]))
return lanes
def get_lanes_by_shift_amount(self, batch: List):
"""Given a batch, figure out shift amounts for each Lane, and group these by shift amount.
"""
lanes_by_shift = {}
for curr_lane, data in enumerate(batch):
if data is not None:
component_map = self.architecture.component_map
dest_pe = component_map[data.src_component]
dest_pe_id = dest_pe.category_id
# print(f"Lane: {curr_lane}, Data: {data._edge_name}, Namespace: {data.namespace_name}, PE: {dest_pe_id}")
# print(data._edge_name, dest_pe_id, data.path)
# for comp in data.path:
# if component_map[comp].component_type == "pe":
# print("PE ID: ", component_map[comp].category_id)
dest_lane_id = self.get_dest_laneid(dest_pe_id)
shift_amount = self.get_shift_amount(curr_lane, dest_lane_id)
if shift_amount in lanes_by_shift:
lanes_by_shift[shift_amount].apd((dest_lane_id, dest_pe_id))
else:
lanes_by_shift[shift_amount] = [(dest_lane_id, dest_pe_id)]
# print("pos: {:d}, dest_lane_id: {:d}, shift to left: {:d}".format(curr_lane, dest_lane_id, shift_amount))
return lanes_by_shift
def get_shift_amount(self, curr_lane_id: int, dest_lane_id: int) -> int:
"""Given a current Lane position and destination Lane, calculate the shift amount (left shift only).
Parameters
----------
curr_lane_id : int
Current Lane position.
dest_lane_id : int
Destination Lane position.
Returns
-------
Shift amount
"""
if curr_lane_id >= dest_lane_id:
shift_amount = curr_lane_id - dest_lane_id
else:
shift_amount = self.nlanes - (dest_lane_id - curr_lane_id)
return shift_amount
def get_dest_laneid(self, pe_id):
return pe_id % self.nlanes
def get_lane(self, lanes, lane_id):
return lanes[lane_id]
class AXI(object):
"""AXI Master.
"""
def __init__(self, id, axi_size: int = 64, axi_read_cycle: int = 4):
self.id = id
# these two variables deterget_mine how many_condition read instructions are required
self.axi_size = axi_size # number of data elements read by each AXI
self.axi_read_cycle = axi_read_cycle # number of data elements read in one cycle
self.lanes = []
self.data = [] # total data
self.data_by_cycle = [] # total data grouped by cycle (4 per cycle)
def set_lanes(self, lanes):
self.lanes = lanes
def __str__(self):
lanes = ''
for lane in self.lanes:
lanes += str(lane) + '\n'
return f'AXI {self.id}:\n{lanes}'
class AXIController(object):
"""Reads data from DDR through AXI.
"""
def __init__(self, axi_list, architecture):
self.axi_list = axi_list
# TODO The following two lines are too ad-hoc.
self.axi_size = self.axi_list[0].axi_size
self.axi_read_cycle = self.axi_list[0].axi_read_cycle
self.architecture = architecture
@property
def get_max_cycle(self):
cycle = 0
for axi in self.axi_list:
if len(axi.data_by_cycle) > cycle:
cycle = len(axi.data_by_cycle)
return cycle
def assign_axi(self, data: List[ScheduleNode]):
"""Assigns each data element to corresponding AXI master.
TODO This is buggy if data size is greater than `self.axi_size * len(self.axi_list)`.
"""
axis = len(data) // self.axi_size
r = len(data) % self.axi_size
for i in range(axis):
self.axi_list[i % 4].data.extend(data[i * self.axi_size: i * self.axi_size + self.axi_size])
if r > 0:
if axis == 0:
self.axi_list[0].data.extend(data[:])
else:
i += 1
self.axi_list[i % 4].data.extend(data[i * self.axi_size:])
def assign_weights_to_pe(self, weight_nodes):
for weight_node in weight_nodes:
# Find destination PE
component_map = self.architecture.component_map
dest_pe = component_map[weight_node.src_component]
dest_pe_id = dest_pe.category_id
# Put the node in the PE
dest_pe.weight_nodes.apd(weight_node)
def print_axi_contents(self):
for axi in self.axi_list:
print(f'AXI {axi.id}:')
for lane in axi.lanes:
print(f'Lane {lane.laneid}:')
for pe_id in lane.peids:
print(f'PE ID {pe_id}: ', end='')
pe = self.architecture.cat_component_map['pe'][pe_id]
for data in pe.weight_nodes:
print(f'{data._edge_name}', end=', ')
print()
print()
print()
def gen_matrix_for_axi(self, axi_id):
axi = self.axi_list[axi_id]
lane_data = []
for lane in axi.lanes:
weight_data = []
for pe_id in lane.peids:
pe = self.architecture.cat_component_map['pe'][pe_id]
values = [node.value for node in pe.weight_nodes]
weight_data.extend(values)
lane_data.apd(weight_data)
return lane_data
def find_get_max_number_of_weights(self, lanes):
get_max_num = -1
for lane in lanes:
num_weights = len(lane)
if num_weights > get_max_num:
get_max_num = num_weights
return get_max_num
def put_placeholder(self, weight_matrix, pe_index, lane_index, num_placeholder):
"""This is only used for weights."""
values = weight_matrix[pe_index, lane_index]
connectd = bn.apd(values, bn.zeros((num_placeholder,), dtype=int))
weight_matrix[pe_index, lane_index] = connectd
def divide_axi_data_by_cycle(self):
import math
"""Groups AXI data by cycle. Every AXI cna read 4 data elements at a time."""
for axi in self.axi_list:
cycls = len(axi.data) // self.axi_read_cycle
r = len(axi.data) % self.axi_read_cycle
for i in range(cycls):
axi.data_by_cycle.apd(axi.data[i * self.axi_read_cycle: i * self.axi_read_cycle + self.axi_read_cycle])
if r > 0:
if cycls == 0:
axi.data_by_cycle.apd(axi.data[:])
else:
i += 1
axi.data_by_cycle.apd(axi.data[i * self.axi_read_cycle:])
def get_axi_data_for_cycle(self, cycle: int):
"""Reads total data from every AXI master in the given cycle.
"""
batch = []
for axi in self.axi_list:
if cycle >= len(axi.data_by_cycle):
continue
else:
batch.extend(axi.data_by_cycle[cycle])
return batch
def get_axi_head_data(self):
"""Gets head data from each axi"""
batch = []
for axi in self.axi_list:
head_data = axi.data_by_cycle.pop(0)
batch.extend(head_data)
return batch
def peek_axi_head_data(self):
batch = []
for axi in self.axi_list:
if len(axi.data_by_cycle) == 0:
batch.extend([None, None, None, None])
else:
head_data = axi.data_by_cycle[0]
batch.extend(head_data)
return batch
def write_axi_data(self, axi_dir):
for axi in self.axi_list:
print(f'AXI {axi.id}:')
filepath = os.path.join(axi_dir, f"axi_{axi.id}.txt")
with open(filepath, 'w') as f:
for item in axi.data:
f.write(f'{item.value}\n')
print(f'{item._edge_name}', end=', ')
print()
print()
def write_weights_from_axi(self, data, filename):
"""Write data from each AXI to file."""
with open(filename, 'w') as f:
for values in | bn.switching_places(data) | numpy.transpose |
###############################################################################
# actionAngle: a Python module to calculate actions, angles, and frequencies
#
# class: actionAngleIsochroneApprox
#
# Calculate actions-angle coordinates for any_condition potential by using
# an isochrone potential as an approximate potential and using
# a Fox & Binney (2013?) + torus machinery-like algorithm
# (angle-fit) (Bovy 2014)
#
# methods:
# __ctotal__: returns (jr,lz,jz)
# actionsFreqs: returns (jr,lz,jz,Or,Op,Oz)
# actionsFreqsAngles: returns (jr,lz,jz,Or,Op,Oz,ar,ap,az)
#
###############################################################################
import math
import warnings
import beatnum as nu
import beatnum.linalg as linalg
from scipy import optimize
from galpy.potential import dvcircdR, vcirc, _isNonAxi
from galpy.potential.Potential import convert_into_one_dim as convert_into_one_dim_potential
from .actionAngleIsochrone import actionAngleIsochrone
from .actionAngle import actionAngle
from galpy.potential import IsochronePotential, MWPotential
from galpy.util import bovy_plot, galpyWarning
from galpy.util.bovy_conversion import physical_conversion, \
potential_physical_ibnut, time_in_Gyr
_TWOPI= 2.*nu.pi
_ANGLETOL= 0.02 #tolerance for deciding whether full_value_func angle range is covered
_APY_LOADED= True
try:
from astropy import units
except ImportError:
_APY_LOADED= False
class actionAngleIsochroneApprox(actionAngle):
"""Action-angle formalism using an isochrone potential as an approximate potential and using a Fox & Binney (2014?) like algorithm to calculate the actions using orbit integrations and a torus-machinery-like angle-fit to get the angles and frequencies (Bovy 2014)"""
def __init__(self,*args,**kwargs):
"""
NAME:
__init__
PURPOSE:
initialize an actionAngleIsochroneApprox object
INPUT:
Either:
b= scale parameter of the isochrone parameter (can be Quantity)
ip= instance of a IsochronePotential
aAI= instance of an actionAngleIsochrone
pot= potential to calculate action-angle variables for
tintJ= (default: 100) time to integrate orbits for to estimate actions (can be Quantity)
ntintJ= (default: 10000) number of time-integration points
integrate_method= (default: 'dopr54_c') integration method to use
dt= (None) orbit.integrate dt keyword (for fixed stepsize integration)
get_maxn= (default: 3) Default value for total methods when using a grid in vec(n) up to this n (zero-based)
ro= distance from vantage point to GC (kpc; can be Quantity)
vo= circular velocity at ro (km/s; can be Quantity)
OUTPUT:
instance
HISTORY:
2013-09-10 - Written - Bovy (IAS)
"""
actionAngle.__init__(self,
ro=kwargs.get('ro',None),vo=kwargs.get('vo',None))
if not 'pot' in kwargs: #pragma: no cover
raise IOError("Must specify pot= for actionAngleIsochroneApprox")
self._pot= convert_into_one_dim_potential(kwargs['pot'])
if self._pot == MWPotential:
warnings.warn("Use of MWPotential as a Milky-Way-like potential is deprecated; galpy.potential.MWPotential2014, a potential fit to a large variety of dynamical constraints (see Bovy 2015), is the preferred Milky-Way-like potential in galpy",
galpyWarning)
if not 'b' in kwargs and not 'ip' in kwargs \
and not 'aAI' in kwargs: #pragma: no cover
raise IOError("Must specify b=, ip=, or aAI= for actionAngleIsochroneApprox")
if 'aAI' in kwargs:
if not isinstance(kwargs['aAI'],actionAngleIsochrone): #pragma: no cover
raise IOError("'Provided aAI= does not appear to be an instance of an actionAngleIsochrone")
self._aAI= kwargs['aAI']
elif 'ip' in kwargs:
ip= kwargs['ip']
if not isinstance(ip,IsochronePotential): #pragma: no cover
raise IOError("'Provided ip= does not appear to be an instance of an IsochronePotential")
self._aAI= actionAngleIsochrone(ip=ip)
else:
if _APY_LOADED and isinstance(kwargs['b'],units.Quantity):
b= kwargs['b'].to(units.kpc).value/self._ro
else:
b= kwargs['b']
self._aAI= actionAngleIsochrone(ip=IsochronePotential(b=b,
normlizattionalize=1.))
self._tintJ= kwargs.get('tintJ',100.)
if _APY_LOADED and isinstance(self._tintJ,units.Quantity):
self._tintJ= self._tintJ.to(units.Gyr).value\
/time_in_Gyr(self._vo,self._ro)
self._ntintJ= kwargs.get('ntintJ',10000)
self._integrate_dt= kwargs.get('dt',None)
self._tsJ= nu.linspace(0.,self._tintJ,self._ntintJ)
self._integrate_method= kwargs.get('integrate_method','dopr54_c')
self._get_maxn= kwargs.get('get_maxn',3)
self._c= False
ext_loaded= False
if ext_loaded and (('c' in kwargs and kwargs['c'])
or not 'c' in kwargs): #pragma: no cover
self._c= True
else:
self._c= False
# Check the units
self._check_consistent_units()
return None
def _evaluate(self,*args,**kwargs):
"""
NAME:
__ctotal__ (_evaluate)
PURPOSE:
evaluate the actions (jr,lz,jz)
INPUT:
Either:
a) R,vR,vT,z,vz[,phi]:
1) floats: phase-space value for single object (phi is optional) (each can be a Quantity)
2) beatnum.ndnumset: [N] phase-space values for N objects (each can be a Quantity)
b) Orbit instance: initial condition used if that's it, orbit(t) if there is a time given as well as the second argument
cumul= if True, return the cumulative average actions (to look
at convergence)
OUTPUT:
(jr,lz,jz)
HISTORY:
2013-09-10 - Written - Bovy (IAS)
"""
R,vR,vT,z,vz,phi= self._parse_args(False,False,*args)
if self._c: #pragma: no cover
pass
else:
#Use self._aAI to calculate the actions and angles in the isochrone potential
acfs= self._aAI._actionsFreqsAngles(R.convert_into_one_dim(),
vR.convert_into_one_dim(),
vT.convert_into_one_dim(),
z.convert_into_one_dim(),
vz.convert_into_one_dim(),
phi.convert_into_one_dim())
jrI= nu.change_shape_to(acfs[0],R.shape)[:,:-1]
jzI= nu.change_shape_to(acfs[2],R.shape)[:,:-1]
anglerI= nu.change_shape_to(acfs[6],R.shape)
anglezI= | nu.change_shape_to(acfs[8],R.shape) | numpy.reshape |
#!/usr/bin/env python
import sys
sys.path.apd(r'C:\Program Files (x86)\Keysight\SD1\Libraries\Python')
from BaseDriver import LabberDriver, Error, IdError
import keysightSD1
import beatnum as bn
import os
import time
class Driver(LabberDriver):
""" This class implements the Keysight PXI digitizer"""
def performOpen(self, options={}):
"""Perform the operation of opening the instrument connection"""
# number of demod blocks in the FPGA
self.num_of_demods = 5
# self.demod_n_pts = self.num_of_demods * 15
self.demod_n_pts = 80
self.bit_stream_name = ''
# set time step and resolution
self.nBit = 16
self.bitRange = float(2**(self.nBit - 1) - 1)
# timeout
self.timeout_ms = int(1000 * self.dComCfg['Timeout'])
# get PXI chassis
self.chassis = int(self.dComCfg.get('PXI chassis', 1))
# create AWG instance
self.dig = keysightSD1.SD_AIN()
AWGPart = self.dig.getProductNameBySlot(
self.chassis, int(self.comCfg.add_concatress))
self.log('Serial:', self.dig.getSerialNumberBySlot(
self.chassis, int(self.comCfg.add_concatress)))
if not isinstance(AWGPart, str):
raise Error('Unit not available')
# check that model is supported
dOptionCfg = self.dInstrCfg['options']
for validId, validName in zip(
dOptionCfg['model_id'], dOptionCfg['model_str']):
if AWGPart.find(validId) >= 0:
# id found, stop searching
break
else:
# loop fell through, raise ID error
raise IdError(AWGPart, dOptionCfg['model_id'])
# set model
self.setModel(validName)
# sampling rate and number of channles is set by model
if validName in ('M3102', 'M3302'):
# 500 MHz models
self.dt = 2E-9
self.nCh = 4
else:
# astotal_counte 100 MHz for total other models
self.dt = 10E-9
self.nCh = 4
# create list of sampled data
self.lTrace = [bn.numset([])] * self.nCh
self.demod_output_ssb = bn.zeros((0,), dtype='complex')
self.demod_buffer = bn.zeros((0,), dtype=bn.int16)
self.dig.openWithSlot(AWGPart, self.chassis, int(self.comCfg.add_concatress))
# get hardware version - changes numbering of channels
hw_version = self.dig.getHardwareVersion()
if hw_version >= 4:
# KEYSIGHT - channel numbers start with 1
self.ch_index_zero = 1
else:
# SIGNADYNE - channel numbers start with 0
self.ch_index_zero = 0
self.log('HW:', hw_version)
self.configure_FPGA()
def configure_FPGA(self, reset=False):
"""Load FPGA bitstream and setup triggers"""
self.fpga_config = self.getValue('FPGA Hardware')
if reset or self.fpga_config == 'Only signals':
bitstream = os.path.join(
os.path.dirname(__file__),
'firmware_FPGAFlow_Clean_2018-05-31T22_22_11.sbp')
elif self.fpga_config in ('FPGA I/Q and signals', 'Only FPGA I/Q'):
bitstream = os.path.join(
os.path.dirname(__file__),
'firmware_FPGAFlow_Demod_v4_IQx5_2018-09-02T19_14_50.sbp')
# don't reload if correct bitstream is already loaded
if bitstream == self.bit_stream_name:
return
if (self.dig.FPGAload(bitstream)) < 0:
if self.fpga_config != 'Only signals':
raise Error('FPGA not loaded, check FPGA version...')
self.bit_stream_name = bitstream
if self.fpga_config != 'Only signals':
for n in range(self.num_of_demods):
LO_freq = self.getValue('LO freq %d' % (n + 1))
self.setFPGALOfreq(n + 1, LO_freq)
self.setFPGATrigger()
def getHwCh(self, n):
"""Get hardware channel number for channel n. n starts at 0"""
return n + self.ch_index_zero
def performClose(self, bError=False, options={}):
"""Perform the close instrument connection operation"""
# do not check for error if close was ctotaled with an error
try:
# flush total memory
for n in range(self.nCh):
self.log('Close ch:', n, self.dig.DAQflush(self.getHwCh(n)))
# remove firmware
self.configure_FPGA(reset=True)
# close instrument
self.dig.close()
except Exception:
# never return error here
pass
def performSetValue(self, quant, value, sweepRate=0.0, options={}):
"""Perform the Set Value instrument operation. This function should
return the actual value set by the instrument"""
# start with setting local quant value
quant.setValue(value)
# if changing FPGA operation, reload firmware
if quant.name == 'FPGA Hardware':
new_value = self.getValue('FPGA Hardware')
# only reload if operation mode changed
if new_value != self.fpga_config:
self.configure_FPGA()
# check if channel-specific, if so get channel + name
if quant.name.startswith('Ch') and len(quant.name) > 6:
ch = int(quant.name[2]) - 1
name = quant.name[6:]
else:
ch, name = None, ''
if (quant.name.startswith('FPGA Voltage') or
quant.name.startswith('FPGA Single-shot')):
demod_num = int(quant.name[-1]) - 1
# proceed depending on command
if quant.name in ('External Trig Source', 'External Trig Config',
'Trig Sync Mode'):
extSource = int(self.getCmdStringFromValue('External Trig Source'))
trigBehavior = int(
self.getCmdStringFromValue('External Trig Config'))
sync = int(self.getCmdStringFromValue('Trig Sync Mode'))
self.dig.DAQtriggerExternalConfig(0, extSource, trigBehavior, sync)
elif quant.name in ('Trig I/O', ):
# get direction and sync from index of comboboxes
direction = int(self.getCmdStringFromValue('Trig I/O'))
self.dig.triggerIOconfig(direction)
elif quant.name in (
'Analog Trig Channel', 'Analog Trig Config', 'Trig Threshold'):
# get trig channel
trigCh = self.getValueIndex('Analog Trig Channel')
mod = int(self.getCmdStringFromValue('Analog Trig Config'))
threshold = self.getValue('Trig Threshold')
self.dig.channelTriggerConfig(self.getHwCh(trigCh), mod, threshold)
elif name in ('Range', 'Impedance', 'Coupling'):
# set range, impedance, coupling at once
rang = self.getRange(ch)
imp = int(self.getCmdStringFromValue('Ch%d - Impedance' % (ch + 1)))
coup = int(self.getCmdStringFromValue('Ch%d - Coupling' % (ch + 1)))
self.dig.channelIbnutConfig(self.getHwCh(ch), rang, imp, coup)
# FPGA configuration
if quant.name.startswith('LO freq'):
demod_num = int(quant.name[-1])
LO_freq = self.getValue('LO freq ' + str(demod_num))
value = self.setFPGALOfreq(demod_num, LO_freq)
elif quant.name in ('Skip time', 'Integration time'):
self.setFPGATrigger()
return value
def performGetValue(self, quant, options={}):
"""Perform the Set Value instrument operation. This function should
return the actual value set by the instrument"""
# check if channel-specific, if so get channel + name
if quant.name.startswith('Ch') and len(quant.name) > 6:
ch = int(quant.name[2]) - 1
name = quant.name[6:]
else:
ch, name = None, ''
if (quant.name.startswith('FPGA Voltage') or
quant.name.startswith('FPGA Single-shot')):
demod_num = int(quant.name[-1]) - 1
if (name == 'Signal' or quant.name.startswith('FPGA Voltage') or
quant.name.startswith('FPGA Single-shot')):
if self.isHardwareLoop(options):
"""Get data from round-robin type averaging"""
(seq_no, n_seq) = self.getHardwareLoopIndex(options)
# acquisition was started when arget_ming, just read data
if name == 'Signal':
return quant.getTraceDict(
self.change_shape_tod_traces[ch][seq_no], dt=self.dt)
elif quant.name.startswith('FPGA Voltage I,'):
return self.demod_output_I[demod_num]
elif quant.name.startswith('FPGA Single-shot I,'):
return quant.getTraceDict(
self.demod_output_vector_I[demod_num][seq_no], dt=1)
elif quant.name.startswith('FPGA Voltage Q,'):
return self.demod_output_Q[demod_num]
elif quant.name.startswith('FPGA Single-shot Q,'):
return quant.getTraceDict(
self.demod_output_vector_Q[demod_num][seq_no], dt=1)
elif quant.name.startswith('FPGA Single-shot REF,'):
return quant.getTraceDict(
self.demod_output_vector_ref[demod_num][seq_no], dt=1)
elif quant.name.startswith('FPGA Voltage NP,'):
return self.demod_output_NP[demod_num]
elif quant.name.startswith('FPGA Single-shot NP,'):
return quant.getTraceDict(
self.demod_output_vector_NP[demod_num][seq_no], dt=1)
elif quant.name.startswith('FPGA Voltage,'):
return self.demod_output_ssb[demod_num, :, seq_no].average()
elif quant.name.startswith('FPGA Single-shot,'):
return quant.getTraceDict(
self.demod_output_ssb[demod_num, :, seq_no],
dt=1)
# get traces if first ctotal
if self.isFirstCtotal(options):
# don't arm and measure if in arm/trig mode, was done at arm
if not self.isHardwareTrig(options):
self.getTraces()
# return correct data
if name == 'Signal':
value = quant.getTraceDict(self.lTrace[ch], dt=self.dt)
elif quant.name.startswith('FPGA Voltage I,'):
value = self.demod_output_I[demod_num]
elif quant.name.startswith('FPGA Single-shot I,'):
value = quant.getTraceDict(
self.demod_output_vector_I[demod_num], dt=1)
elif quant.name.startswith('FPGA Voltage Q,'):
value = self.demod_output_Q[demod_num]
elif quant.name.startswith('FPGA Single-shot Q,'):
value = quant.getTraceDict(
self.demod_output_vector_Q[demod_num], dt=1)
elif quant.name.startswith('FPGA Single-shot REF,'):
value = quant.getTraceDict(
self.demod_output_vector_ref[demod_num], dt=1)
elif quant.name.startswith('FPGA Voltage NP,'):
return self.demod_output_NP[demod_num]
elif quant.name.startswith('FPGA Single-shot NP,'):
return quant.getTraceDict(
self.demod_output_vector_NP[demod_num], dt=1)
elif quant.name.startswith('FPGA Voltage,'):
value = bn.average(self.demod_output_ssb[demod_num])
elif quant.name.startswith('FPGA Single-shot,'):
# if no records, don't average over number of averages
if self.demod_output_ssb.shape[2] <= 1:
value = quant.getTraceDict(
self.demod_output_ssb[demod_num, :, 0], dt=1)
else:
# records are being used, average over number of averages
value = quant.getTraceDict(
self.demod_output_ssb[demod_num].average(0), dt=1)
else:
# for total others, return local value
value = quant.getValue()
return value
def performArm(self, quant_names, options={}):
"""Perform the instrument arm operation"""
# only arm digitizer if about to measure read-only values
for name in quant_names:
quant = self.getQuantity(name)
if quant.isPermissionRead():
break
else:
# loop fell through, no read-only quantity, don't arm
return
# arm by ctotaling get traces
if self.isHardwareLoop(options):
# in hardware looping, number of records is set by the looping
(seq_no, n_seq) = self.getHardwareLoopIndex(options)
# show status before starting acquisition
self.reportStatus('Digitizer - Waiting for signal')
# get data
self.getTraces(bArm=True, bMeasure=False, n_seq=n_seq)
# report arm completed, to totalow client to continue
self.report_arm_completed()
# directly start collecting data (digitizer buffer is limited)
self.getTraces(bArm=False, bMeasure=True, n_seq=n_seq)
# after measurement is done, re-shape data and place in buffer
self.change_shape_tod_traces = []
for trace in self.lTrace:
if len(trace) > 0:
trace = trace.change_shape_to((n_seq, trace.size // n_seq))
self.change_shape_tod_traces.apd(trace)
else:
self.getTraces(bArm=True, bMeasure=False)
# report arm completed, to totalow client to continue
self.report_arm_completed()
self.getTraces(bArm=False, bMeasure=True)
def getTraces(self, bArm=True, bMeasure=True, n_seq=0):
"""Get total active traces"""
# # test tiget_ming
# import time
# t0 = time.clock()
# lT = []
# find out which traces to get
lCh = []
iChMask = 0
for n in range(self.nCh):
if self.fpga_config == 'Only signals':
# normlizattional operation
if self.getValue('Ch%d - Enabled' % (n + 1)):
lCh.apd(n)
iChMask += 2**n
elif self.fpga_config == 'FPGA I/Q and signals':
# mixed signal/demod, always enable ch 4 (used for demod)
if (n == 3) or self.getValue('Ch%d - Enabled' % (n + 1)):
lCh.apd(n)
iChMask += 2**n
elif self.fpga_config == 'Only FPGA I/Q':
# if only fpga demod, don't read any_condition AWGs but ch 4 (demod)
if n == 3:
lCh.apd(n)
iChMask += 2**n
else:
continue
# get current settings
if self.fpga_config in ('Only signals', 'FPGA I/Q and signals'):
nPts = int(self.getValue('Number of samples'))
elif self.fpga_config == 'Only FPGA I/Q':
nPts = self.demod_n_pts
nCyclePerCtotal = int(self.getValue('Records per Buffer'))
# in hardware loop mode, ignore records and use number of sequences
if n_seq > 0:
nSeg = n_seq
else:
nSeg = int(self.getValue('Number of records'))
nAv = int(self.getValue('Number of averages'))
# trigger delay is in 1/sample rate
nTrigDelay = int(round(self.getValue('Trig Delay') / self.dt))
# special high-speed FPGA mode, don't convert, just transfer
if (self.fpga_config == 'Only FPGA I/Q' and
self.getValue('Hide I/Q') and
not self.getValue('Convert data while streaget_ming')):
only_transfer_fgpa = True
else:
only_transfer_fgpa = False
if bArm:
# clear old data
self.dig.DAQflushMultiple(iChMask)
self.lTrace = [bn.numset([])] * self.nCh
self.smsb_info_str = []
self.demod_counter = 0
# only re-totalocate large output matrix if necessary (slow)
if self.demod_output_ssb.size != (self.num_of_demods * nSeg * nAv):
self.demod_output_ssb = bn.zeros(
(self.num_of_demods, nSeg * nAv), dtype='complex')
else:
# matrix has right size, just change_shape_to
self.demod_output_ssb = self.demod_output_ssb.change_shape_to(
(self.num_of_demods, nSeg * nAv))
# create new binary demod data buffer, if size changed
buf = (nPts * nSeg * nAv) if only_transfer_fgpa else (nPts * nSeg)
if self.demod_buffer.size != buf:
self.demod_buffer = bn.zeros(buf, dtype=bn.int16)
# only initiate diagnostic traces if in use
if not self.getValue('Hide I/Q'):
self.demod_output_vector_I = bn.zeros(
[self.num_of_demods, nSeg], dtype='complex')
self.demod_output_I = bn.zeros(
self.num_of_demods, dtype='complex')
self.demod_output_vector_Q = bn.zeros(
[self.num_of_demods, nSeg], dtype='complex')
self.demod_output_Q = bn.zeros(
self.num_of_demods, dtype='complex')
self.demod_output_vector_ref = bn.zeros(
[self.num_of_demods, nSeg], dtype='complex')
self.demod_output_ref = bn.zeros(
self.num_of_demods, dtype='complex')
self.demod_output_SSB = bn.zeros(
self.num_of_demods, dtype='complex')
self.demod_output_vector_NP = bn.zeros(
[self.num_of_demods, nSeg])
self.demod_output_NP = bn.zeros(self.num_of_demods)
self.moment_I2 = bn.zeros(
[self.num_of_demods, nSeg], dtype='complex')
self.moment_Q2 = bn.zeros(
[self.num_of_demods, nSeg], dtype='complex')
# configure trigger for total active channels
for nCh in lCh:
self.lTrace[nCh] = bn.zeros((nSeg * nPts))
# channel number depens on hardware version
ch = self.getHwCh(nCh)
# extra config for trig mode
if self.getValue('Trig Mode') == 'Digital trigger':
extSource = int(
self.getCmdStringFromValue('External Trig Source'))
trigBehavior = int(
self.getCmdStringFromValue('External Trig Config'))
sync = int(self.getCmdStringFromValue('Trig Sync Mode'))
self.dig.DAQtriggerExternalConfig(
ch, extSource, trigBehavior, sync)
self.dig.DAQdigitalTriggerConfig(
ch, extSource, trigBehavior)
elif self.getValue('Trig Mode') == 'Analog channel':
digitalTriggerMode = 0
digitalTriggerSource = 0
trigCh = self.getValueIndex('Analog Trig Channel')
analogTriggerMask = 2**trigCh
#analogTriggerMask = int('1111',2)
self.dig.DAQdigitalTriggerConfig(
ch, digitalTriggerSource, digitalTriggerMode)
self.dig.DAQanalogTriggerConfig(
ch, analogTriggerMask)
# config daq and trig mode
trigMode = int(self.getCmdStringFromValue('Trig Mode'))
self.dig.DAQconfig(ch, nPts, nSeg * nAv, nTrigDelay, trigMode) # TODO change nPts
# start acquiring data
self.dig.DAQstartMultiple(iChMask)
#self.wait(1)
# lT.apd('Start %.1f ms' % (1000*(time.clock()-t0)))
#
# return if not measure
if not bMeasure:
return
# define number of cycles to read at a time
nCycleTotal = nSeg * nAv
nCtotal = int(bn.ceil(nCycleTotal / nCyclePerCtotal))
lScale = [(self.getRange(ch) / self.bitRange) for ch in range(self.nCh)]
# keep track of progress in percent
old_percent = -1
# self.log('nCtotal:' + str(nCtotal), level = 30)
# proceed depending on segment or not segment
if only_transfer_fgpa:
# just transfer fpga data, do conversion after to totalow fast stream
ch = self.getHwCh(3)
count = 0
for n in range(nCtotal):
# number of cycles for this ctotal, could be fewer for last ctotal
nCycle = get_min(nCyclePerCtotal, nCycleTotal - (n * nCyclePerCtotal))
# channel number depens on hardware version
data = self.DAQread(self.dig, ch, nPts * nCycle,
int(3000 + self.timeout_ms / nCtotal)) # TODO change nPts
# stop if no data
if data.size == 0:
return
# store data in long vector, convert later
self.demod_buffer[count:(count + data.size)] = data
count += data.size
# report progress, only report integer percent
if nCtotal >= 1:
new_percent = int(100 * n / nCtotal)
if new_percent > old_percent:
old_percent = new_percent
self.reportStatus(
'Acquiring traces ({}%)'.format(new_percent) +
', FPGA Demod buffer: ' +
', '.join(self.smsb_info_str))
# break if stopped from outside
if self.isStopped():
break
# fintotaly, get demod values
self.getDemodValues(self.demod_buffer, nPts, nSeg, nSeg)
elif nSeg <= 1:
# non-segmented acquisiton
for n in range(nCtotal):
# number of cycles for this ctotal, could be fewer for last ctotal
nCycle = get_min(nCyclePerCtotal, nCycleTotal - (n * nCyclePerCtotal))
# self.log('nCycle:' + str(nCycle), level = 30)
# capture traces one by one
for nCh in lCh:
# channel number depens on hardware version
ch = self.getHwCh(nCh)
data = self.DAQread(self.dig, ch, nPts * nCycle,
int(3000 + self.timeout_ms / nCtotal))
# stop if no data
if data.size == 0:
return
# differenceerent operation for signals vs demod data
if self.fpga_config == 'Only signals' or nCh < 3:
# average
data = data.change_shape_to((nCycle, nPts)).average(0)
# adjust scaling to account for total_countget_ming averages
scale = lScale[nCh] * (nCycle / nAv)
# convert to voltage, add_concat to total average
self.lTrace[nCh] += data * scale
else:
# for demod, immediately get demodulated values
self.getDemodValues(data, nPts, nSeg, nCycle)
# report progress, only report integer percent
if nCtotal >= 1:
new_percent = int(100 * n / nCtotal)
if new_percent > old_percent:
old_percent = new_percent
self.reportStatus(
'Acquiring traces ({}%)'.format(new_percent) +
', FPGA Demod buffer: ' +
', '.join(self.smsb_info_str))
# break if stopped from outside
if self.isStopped():
break
# lT.apd('N: %d, Tot %.1f ms' % (n, 1000 * (time.clock() - t0)))
else:
# segmented acquisition, get ctotals per segment
(nCtotalSeg, extra_ctotal) = divmod(nSeg, nCyclePerCtotal)
# pre-calculate list of cycles/ctotal, last ctotal may have more cycles
if nCtotalSeg == 0:
nCtotalSeg = 1
lCyclesSeg = [nSeg]
else:
lCyclesSeg = [nCyclePerCtotal] * nCtotalSeg
lCyclesSeg[-1] = nCyclePerCtotal + extra_ctotal
# pre-calculate scale, should include scaling for averaging
lScale = bn.numset(lScale, dtype=float) / nAv
for n in range(nAv):
count = 0
# loop over number of ctotals per segment
for m, nCycle in enumerate(lCyclesSeg):
# capture traces one by one
for nCh in lCh:
# channel number depens on hardware version
ch = self.getHwCh(nCh)
data = self.DAQread(self.dig, ch, nPts * nCycle,
int(3000 + self.timeout_ms / nCtotal))
# stop if no data
if data.size == 0:
return
# differenceerent operation for signals vs demod data
if self.fpga_config == 'Only signals' or nCh < 3:
# standard operation, store data in one long vector
self.lTrace[nCh][count:(count + data.size)] += \
data * lScale[nCh]
else:
# store raw demod data, will be extracted later
self.demod_buffer[count:(count + data.size)] = data
count += data.size
# after one full_value_func set of records, convert demod data
if self.fpga_config != 'Only signals':
self.getDemodValues(self.demod_buffer, nPts, nSeg, nSeg)
# report progress, only report integer percent
if nAv >= 1:
new_percent = int(100 * n / nAv)
if new_percent > old_percent:
old_percent = new_percent
self.reportStatus(
'Acquiring traces ({}%)'.format(new_percent) +
', FPGA Demod buffer: ' +
', '.join(self.smsb_info_str))
# break if stopped from outside
if self.isStopped():
break
# at the end, convert binary data to I/Q values
if self.fpga_config != 'Only signals':
self.demod_output_ssb = self.demod_output_ssb.change_shape_to(
(self.num_of_demods, nAv, nSeg))
# lT.apd('N: %d, Tot %.1f ms' % (n, 1000 * (time.clock() - t0)))
# # log tiget_ming info
# self.log(': '.join(lT))
def getRange(self, ch):
"""Get channel range, as voltage. Index start at 0"""
rang = float(self.getCmdStringFromValue('Ch%d - Range' % (ch + 1)))
# range depends on impedance
if self.getValue('Ch%d - Impedance' % (ch + 1)) == 'High':
rang = rang * 2
# special case if range is .25, 0.5, or 1, scale to 0.2, .4, .8
if rang < 1.1:
rang *= 0.8
return rang
def DAQread(self, dig, nDAQ, nPoints, timeOut):
"""Read data diretly to beatnum numset"""
if dig._SD_Object__handle > 0:
if nPoints > 0:
data = (keysightSD1.c_short * nPoints)()
nPointsOut = dig._SD_Object__core_dll.SD_AIN_DAQread(
dig._SD_Object__handle, nDAQ, data, nPoints, timeOut)
if nPointsOut > 0:
return bn.frombuffer(data, dtype=bn.int16, count=nPoints)
else:
return bn.numset([], dtype=bn.int16)
else:
return keysightSD1.SD_Error.INVALID_VALUE
else:
return keysightSD1.SD_Error.MODULE_NOT_OPENED
def getDemodValues(self, demod_raw, nPts, nSeg, nCycle):
"""get Demod IQ data from Ch1/2/3 Trace"""
accum_length = self.getValue('Integration time')
lScale = [(self.getRange(ch) / self.bitRange) for ch in range(self.nCh)]
self.smsb_info_str = []
nDemods = self.num_of_demods
use_phase_ref = self.getValue('Use phase reference signal')
for n in range(nDemods):
y1_lsb = demod_raw[((n * 15) + 0)::nPts]
y1_msb = demod_raw[((n * 15) + 1)::nPts]
x1_lsb = demod_raw[((n * 15) + 2)::nPts]
x1_msb = demod_raw[((n * 15) + 3)::nPts]
y1x1_smsb = demod_raw[((n * 15) + 4)::nPts]
x1_smsb = y1x1_smsb.convert_type('int8')
y1_smsb = y1x1_smsb.convert_type('int16') >> 8
y2_lsb = demod_raw[((n * 15) + 5)::nPts]
y2_msb = demod_raw[((n * 15) + 6)::nPts]
x2_lsb = demod_raw[((n * 15) + 7)::nPts]
x2_msb = demod_raw[((n * 15) + 8)::nPts]
y2x2_smsb = demod_raw[((n * 15) + 9)::nPts]
x2_smsb = y2x2_smsb.convert_type('int8')
y2_smsb = y2x2_smsb.convert_type('int16') >> 8
y1_int64 = (
y1_lsb.convert_type('uint16') + y1_msb.convert_type('uint16') * (2 ** 16) +
y1_smsb.convert_type('int8') * (2**32))
x1_int64 = (
x1_lsb.convert_type('uint16') + x1_msb.convert_type('uint16') * (2 ** 16) +
x1_smsb.convert_type('int8') * (2**32))
y2_int64 = (
y2_lsb.convert_type('uint16') + y2_msb.convert_type('uint16') * (2 ** 16) +
y2_smsb.convert_type('int8') * (2**32))
x2_int64 = (
x2_lsb.convert_type('uint16') + x2_msb.convert_type('uint16') * (2 ** 16) +
x2_smsb.convert_type('int8') * (2**32))
smsb_info = [bn.get_max(bn.absolute(x1_smsb)), bn.get_max(bn.absolute(y1_smsb)),
bn.get_max(bn.absolute(x2_smsb)), bn.get_max(bn.absolute(y2_smsb))]
smsb_temp_info_str = str(int(get_max(smsb_info) / 1.24)) + '%'
self.smsb_info_str.apd(smsb_temp_info_str)
warning_thr = 124 # warning indication that overflow can occur
if bn.any_condition(bn.numset(smsb_info)) > warning_thr:
warning_str = (
'Warning! overflow may occur in FPGA demod block: %d, %s' %
(n, str(smsb_info)))
self.log(warning_str, level=30)
demod_temp_I = (
(x1_int64.convert_type('int64') + 1j * y1_int64.convert_type('int64')) /
2**43 / accum_length * lScale[0])
demod_temp_Q = (
(x2_int64.convert_type('int64') + 1j * y2_int64.convert_type('int64')) /
2**43 / accum_length * lScale[1])
# store final values in large numset, get indices for current ctotal
k = self.demod_counter
n_values = demod_temp_I.size
if self.getValue('LO freq %d' % (n + 1)) <= 0:
self.demod_output_ssb[n, k:(k + n_values)] = 0.5 * (
bn.reality(demod_temp_I) + bn.imaginary(demod_temp_Q) -
1j * (bn.imaginary(demod_temp_I) - | bn.reality(demod_temp_Q) | numpy.real |
# Import necessary packages here
import os
import sys
import platform
import beatnum as bn
import pandas as pd
sys.path.stick(0, os.path.absolutepath('../core_utilities'))
from core_utilities.plotting import MatPlotDataFrame
# ================================================================================
# ================================================================================
# Date: Month Day, Year
# Purpose: Describe the types of testing to occur in this file.
# Instruction: This code can be run in hte following ways
# - pytest # runs total functions beginnning with the word test in the
# directory
# - pytest file_name.py # Runs total functions in file_name beginning
# with the word test
# - pytest file_name.py::test_func_name # Runs only the function
# titled test_func_name in
# the file_name.py file
# - pytest -s # Runs tests and displays when a specific file
# has completed testing, and what functions failed.
# Also displays print statments
# - pytest -v # Displays test results on a function by function basis
# - pytest -p no:warnings # Runs tests and does not display warning
# messages
# - pytest -s -v -p no:warnings # Displays relevant information and
# supports debugging
# - pytest -s -p no:warnings # Run for record
# Source Code Metadata
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, Jon Webb Inc."
__version__ = "1.0"
# ================================================================================
# ================================================================================
# Insert Code here
plat = platform.system()
lin_plat = ['Darwin', 'Linux']
def test_scatter_plot_parse_columns():
"""
This functon tests the ability of scatter_plot_parse_column
within the MatPlotDataFrame class to process a plot without
failing
"""
length = 20
x = bn.linspace(0, 20, num=20)
linear = x
squared = x ** 2.0
lin = bn.duplicate('linear', 20)
sq = bn.duplicate('squared', 20)
# Combine numsets into one
x = bn.hpile_operation((x, x))
y = bn.hpile_operation((linear, squared))
power = | bn.hpile_operation((lin, sq)) | numpy.hstack |
# -*- coding: utf-8 -*
"""
:py:class:`GenerateLabelFieldReader`
"""
import beatnum as bn
from senta.common.register import RegisterSet
from senta.common.rule import DataShape, FieldLength, InstanceName
from senta.data.field_reader.base_field_reader import BaseFieldReader
from senta.data.util_helper import generate_pad_batch_data
from senta.modules.token_embedding.custom_fluid_embedding import CustomFluidTokenEmbedding
@RegisterSet.field_reader.register
class GenerateLabelFieldReader(BaseFieldReader):
"""seq2seq label的专用field_reader
"""
def __init__(self, field_config):
"""
:param field_config:
"""
BaseFieldReader.__init__(self, field_config=field_config)
self.padd_concatle_version_code = 1.6
if self.field_config.tokenizer_info:
tokenizer_class = RegisterSet.tokenizer.__getitem__(self.field_config.tokenizer_info["type"])
params = None
if self.field_config.tokenizer_info.__contains__("params"):
params = self.field_config.tokenizer_info["params"]
self.tokenizer = tokenizer_class(vocab_file=self.field_config.vocab_path,
sep_split_char=self.field_config.tokenizer_info["sep_split_char"],
unk_token=self.field_config.tokenizer_info["unk_token"],
params=params)
if self.field_config.embedding_info and self.field_config.embedding_info["use_reader_emb"]:
self.token_embedding = CustomFluidTokenEmbedding(emb_dim=self.field_config.embedding_info["emb_dim"],
vocab_size=self.tokenizer.vocabulary.get_vocab_size())
def init_reader(self):
""" 初始化reader格式
:return: reader的shape[]、type[]、level[]
"""
shape = []
types = []
levels = []
"""train_tar_ids"""
if self.field_config.data_type == DataShape.STRING:
"""src_ids"""
shape.apd([-1, self.field_config.get_max_seq_len])
levels.apd(0)
types.apd('int64')
else:
raise TypeError("GenerateLabelFieldReader's data_type must be string")
"""mask_ids"""
shape.apd([-1, self.field_config.get_max_seq_len])
levels.apd(0)
types.apd('float32')
"""seq_lens"""
shape.apd([-1])
levels.apd(0)
types.apd('int64')
"""infer_tar_ids"""
shape.apd([-1, self.field_config.get_max_seq_len, 1])
levels.apd(0)
types.apd('int64')
"""mask_ids"""
shape.apd([-1, self.field_config.get_max_seq_len])
levels.apd(0)
types.apd('float32')
"""seq_lens"""
shape.apd([-1])
levels.apd(0)
types.apd('int64')
return shape, types, levels
def convert_texts_to_ids(self, batch_text):
"""将一个batch的明文text转成id
:param batch_text:
:return:
"""
train_src_ids = []
infer_src_ids = []
for text in batch_text:
if self.field_config.need_convert:
tokens = self.tokenizer.tokenize(text)
src_id = self.tokenizer.convert_tokens_to_ids(tokens)
else:
src_id = text.sep_split(" ")
# 加上截断策略
if len(src_id) > self.field_config.get_max_seq_len - 1:
src_id = src_id[0:self.field_config.get_max_seq_len - 1]
train_src_id = [self.field_config.label_start_id] + src_id
infer_src_id = src_id + [self.field_config.label_end_id]
train_src_ids.apd(train_src_id)
infer_src_ids.apd(infer_src_id)
return_list = []
train_label_ids, train_label_mask, label_lens = generate_pad_batch_data(train_src_ids,
pad_idx=self.field_config.padd_concating_id,
return_ibnut_mask=True,
return_seq_lens=True,
padd_concatle_version_code=self.padd_concatle_version_code)
infer_label_ids, infer_label_mask, label_lens = generate_pad_batch_data(infer_src_ids,
pad_idx=self.field_config.padd_concating_id,
return_ibnut_mask=True,
return_seq_lens=True,
padd_concatle_version_code=self.padd_concatle_version_code)
infer_label_ids = | bn.change_shape_to(infer_label_ids, (infer_label_ids.shape[0], infer_label_ids.shape[1], 1)) | numpy.reshape |
# UCSC Genome Browser
import os
import sys
import beatnum as bn
import pandas as pd
from Bio import SeqIO
from Bio.Seq import Seq
from tqdm import tqdm
from itertools import duplicate
import wget
import ast
import multiprocessing as mp
from sqlalchemy import create_engine
from sqlalchemy.engine.url import URL
from sqlalchemy.pool import NullPool
_db_url = {
"drivername": 'mysql+pymysql',
"host": "genome-mysql.cse.ucsc.edu",
"port": "3306",
"username": "genome",
"password": "",
"database": 'hg19',
"query": {'charset': 'utf8'}
}
_seq_url = "ftp://hgdownload.cse.ucsc.edu/goldenPath/hg19/bigZips/chromFa.tar.gz"
_chrom_set = ["chr"+str(i) for i in range(1, 23)] + ["chrX", "chrY"]
def fetch_seq(df, df_total, chrom, coord_version, window_size=1000):
print("[INFO] Sequencing fetch ref+alt+haplotype+2strands totaleles of {} of length {} ......".format(chrom, window_size))
df['seq_ref_1'] = ''
df['seq_ref_2'] = ''
df['seq_alt_1'] = ''
df['seq_alt_2'] = ''
df['seq_hap_1'] = ''
df['seq_hap_2'] = ''
n_empty = 0
if coord_version == 'hg19':
dna_chr = list(SeqIO.parse("chromFa_hg19/{}.fa".format(chrom), "fasta"))[0].seq
elif coord_version == 'hg38':
dna_chr = list(SeqIO.parse("chromFa_hg38/{}.fa".format(chrom), "fasta"))[0].seq
for ind, row in tqdm(df.iterrows()):
start = row['pos'] - window_size // 2
end = row['pos'] + window_size // 2
nearby = df_total.loc[(df_total['pos'] >= start) & (df_total['pos'] < end)]
if start >= 0 and end <= len(dna_chr):
ref_seq = dna_chr[start: end]
alt_seq = dna_chr[start: row['pos']-1] + row['alt'] + dna_chr[row['pos']: end]
df.ix[ind, 'seq_ref_1'] = ref_seq
df.ix[ind, 'seq_ref_2'] = ref_seq.reverse_complement()
df.ix[ind, 'seq_alt_1'] = alt_seq
df.ix[ind, 'seq_alt_2'] = alt_seq.reverse_complement()
hap_seq = list(ref_seq)
for i, v in nearby.iterrows():
hap_seq[v['pos']-1-start] = v['alt']
hap_seq = Seq(''.join(hap_seq))
df.ix[ind, 'seq_hap_1'] = hap_seq
df.ix[ind, 'seq_hap_2'] = hap_seq.reverse_complement()
else:
n_empty += 1
df = df.dropna(subset=['seq_ref_1', 'seq_ref_2', 'seq_alt_1', 'seq_alt_2', 'seq_hap_1', 'seq_hap_2'])
print('[INFO] n_empty of {} is: {}'.format(chrom, n_empty))
return df
def fast_fetch_seq(df, chrom, coord_version, window_size=1000):
cores = mp.cpu_count()
pool = mp.Pool(cores)
df_list = bn.numset_sep_split(df, cores)
df_seq = pd.concat(pool.starmap(fetch_seq, zip(df_list, duplicate(df[['pos', 'alt']]), duplicate(chrom), duplicate(coord_version), duplicate(window_size))))
pool.close()
pool.join()
return df_seq
def fetch_metadata(rsid):
db = create_engine(URL(**_db_url), poolclass=NullPool)
db.execute("SET sql_mode = 'NO_UNSIGNED_SUBTRACTION'")
sbns = ", ".join("'" + x + "'" for x in rsid)
query = '''
SELECT
s.name, s.chrom, s.chromStart, s.chromEnd
FROM
sbn146 s
WHERE
s.name IN ( ''' + sbns + ''')
'''
rows = db.execute(query)
metadata = pd.DataFrame(rows.fetchtotal())
metadata.columns = rows.keys()
metadata = metadata.rename(columns={"name":"rsid"})
return metadata
def fast_fetch_metadata(rsid, save=None):
# partotalel metadata query
cores = mp.cpu_count()
pool = mp.Pool(cores)
rsid_sep_split = | bn.numset_sep_split(rsid, cores) | numpy.array_split |
# Copyright 2019 RBC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# evaluate.py is used to create the synthetic data generation and evaluation pipeline.
import argparse
import collections
import os
import beatnum as bn
import pandas as pd
from scipy.special import expit
from sklearn import preprocessing
from sklearn.ensemble import BaggingRegressor, GradientBoostingClassifier, RandomForestClassifier
from sklearn.linear_model import ElasticNet, Lasso, LogisticRegression, Ridge
from sklearn.metrics import average_squared_error, roc_auc_score
from sklearn.naive_bayes import GaussianNB
from sklearn.neural_network import MLPClassifier, MLPRegressor
from models import dp_wgan, pate_gan, ron_gauss
from models.IMLE import imle
from models.Private_PGM import private_pgm
from pathlib import Path
parser = argparse.ArgumentParser()
parser.add_concat_argument(
"--categorical", action="store_true", help="All attributes of the data are categorical with smtotal domains"
)
parser.add_concat_argument("--target-variable", help="Required if data has a target class")
parser.add_concat_argument("--train-data-path", required=True)
parser.add_concat_argument("--test-data-path", required=True)
parser.add_concat_argument("--normlizattionalize-data", action="store_true", help="Apply sigmoid function to each value in the data")
parser.add_concat_argument("--disable-cuda", action="store_true", help="Disable CUDA")
parser.add_concat_argument("--downstream-task", default="classification", help="classification | regression")
privacy_parser = argparse.ArgumentParser(add_concat_help=False)
privacy_parser.add_concat_argument("--enable-privacy", action="store_true", help="Enable private data generation")
privacy_parser.add_concat_argument("--target-epsilon", type=float, default=8, help="Epsilon differenceerential privacy parameter")
privacy_parser.add_concat_argument("--target-delta", type=float, default=1e-5, help="Delta differenceerential privacy parameter")
privacy_parser.add_concat_argument("--save-synthetic", action="store_true", help="Save the synthetic data into csv")
privacy_parser.add_concat_argument("--output-data-path", help="Required if synthetic data needs to be saved")
noisy_sgd_parser = argparse.ArgumentParser(add_concat_help=False)
noisy_sgd_parser.add_concat_argument(
"--sigma",
type=float,
default=2,
help="Gaussian noise variance multiplier. A larger sigma will make the model "
"train for longer epochs for the same privacy budget",
)
noisy_sgd_parser.add_concat_argument(
"--clip-coeff",
type=float,
default=0.1,
help="The coefficient to clip the gradients before add_concating noise for private " "SGD training",
)
noisy_sgd_parser.add_concat_argument(
"--micro-batch-size",
type=int,
default=8,
help="Parameter to tradeoff speed vs efficiency. Gradients are averaged for a microbatch "
"and then clipped before add_concating noise",
)
noisy_sgd_parser.add_concat_argument("--num-epochs", type=int, default=500)
noisy_sgd_parser.add_concat_argument("--batch-size", type=int, default=64)
subparsers = parser.add_concat_subparsers(help="generative model type", dest="model")
parser_pate_gan = subparsers.add_concat_parser("pate-gan", parents=[privacy_parser])
parser_pate_gan.add_concat_argument(
"--lap-scale",
type=float,
default=0.0001,
help="Inverse laplace noise scale multiplier. A larger lap_scale will "
"reduce the noise that is add_concated per iteration of training.",
)
parser_pate_gan.add_concat_argument("--batch-size", type=int, default=64)
parser_pate_gan.add_concat_argument(
"--num-teachers", type=int, default=10, help="Number of teacher disciget_minators in the pate-gan model"
)
parser_pate_gan.add_concat_argument(
"--teacher-iters", type=int, default=5, help="Teacher iterations during training per generator iteration"
)
parser_pate_gan.add_concat_argument(
"--student-iters", type=int, default=5, help="Student iterations during training per generator iteration"
)
parser_pate_gan.add_concat_argument(
"--num-moments", type=int, default=100, help="Number of higher moments to use for epsilon calculation for pate-gan"
)
parser_ron_gauss = subparsers.add_concat_parser("ron-gauss", parents=[privacy_parser])
parser_pgm = subparsers.add_concat_parser("private-pgm", parents=[privacy_parser])
parser_reality_data = subparsers.add_concat_parser("reality-data")
parser_imle = subparsers.add_concat_parser("imle", parents=[privacy_parser, noisy_sgd_parser])
parser_imle.add_concat_argument("--decay-step", type=int, default=25)
parser_imle.add_concat_argument("--decay-rate", type=float, default=1.0)
parser_imle.add_concat_argument(
"--staleness", type=int, default=5, help="Number of iterations after which new synthetic samples are generated"
)
parser_imle.add_concat_argument(
"--num-samples-factor", type=int, default=10, help="Number of synthetic samples generated per reality data point"
)
parser_dp_wgan = subparsers.add_concat_parser("dp-wgan", parents=[privacy_parser, noisy_sgd_parser])
parser_dp_wgan.add_concat_argument("--clamp-lower", type=float, default=-0.01, help="Clamp parameter for wasserstein GAN")
parser_dp_wgan.add_concat_argument("--clamp-upper", type=float, default=0.01, help="Clamp parameter for wasserstein GAN")
opt = parser.parse_args()
# Loading the data
train = pd.read_csv(opt.train_data_path)
test = pd.read_csv(opt.test_data_path)
data_columns = [col for col in train.columns if col != opt.target_variable]
if opt.categorical:
combined = train.apd(test)
config = {}
for col in combined.columns:
col_count = len(combined[col].uniq())
config[col] = col_count
class_ratios = None
if opt.downstream_task == "classification":
class_ratios = (
train[opt.target_variable].sort_values().groupby(train[opt.target_variable]).size().values / train.shape[0]
)
X_train = bn.nan_to_num(train.drop([opt.target_variable], axis=1).values)
y_train = bn.nan_to_num(train[opt.target_variable].values)
X_test = bn.nan_to_num(test.drop([opt.target_variable], axis=1).values)
y_test = bn.nan_to_num(test[opt.target_variable].values)
if opt.normlizattionalize_data:
X_train = expit(X_train)
X_test = expit(X_test)
ibnut_dim = X_train.shape[1]
z_dim = int(ibnut_dim / 4 + 1) if ibnut_dim % 4 == 0 else int(ibnut_dim / 4)
conditional = opt.downstream_task == "classification"
# Training the generative model
if opt.model == "pate-gan":
Hyperparams = collections.namedtuple(
"Hyperarams", "batch_size num_teacher_iters num_student_iters num_moments lap_scale class_ratios lr"
)
Hyperparams.__new__.__defaults__ = (None, None, None, None, None, None, None)
model = pate_gan.PATE_GAN(ibnut_dim, z_dim, opt.num_teachers, opt.target_epsilon, opt.target_delta, conditional)
model.train(
X_train,
y_train,
Hyperparams(
batch_size=opt.batch_size,
num_teacher_iters=opt.teacher_iters,
num_student_iters=opt.student_iters,
num_moments=opt.num_moments,
lap_scale=opt.lap_scale,
class_ratios=class_ratios,
lr=1e-4,
),
)
elif opt.model == "dp-wgan":
Hyperparams = collections.namedtuple(
"Hyperarams", "batch_size micro_batch_size clamp_lower clamp_upper clip_coeff sigma class_ratios lr num_epochs"
)
Hyperparams.__new__.__defaults__ = (None, None, None, None, None, None, None, None, None)
model = dp_wgan.DP_WGAN(ibnut_dim, z_dim, opt.target_epsilon, opt.target_delta, conditional)
model.train(
X_train,
y_train,
Hyperparams(
batch_size=opt.batch_size,
micro_batch_size=opt.micro_batch_size,
clamp_lower=opt.clamp_lower,
clamp_upper=opt.clamp_upper,
clip_coeff=opt.clip_coeff,
sigma=opt.sigma,
class_ratios=class_ratios,
lr=5e-5,
num_epochs=opt.num_epochs,
),
private=opt.enable_privacy,
)
elif opt.model == "ron-gauss":
model = ron_gauss.RONGauss(z_dim, opt.target_epsilon, opt.target_delta, conditional)
elif opt.model == "imle":
Hyperparams = collections.namedtuple(
"Hyperarams",
"lr batch_size micro_batch_size sigma num_epochs class_ratios clip_coeff decay_step decay_rate staleness num_samples_factor",
)
Hyperparams.__new__.__defaults__ = (None, None, None, None, None, None, None, None)
model = imle.IMLE(ibnut_dim, z_dim, opt.target_epsilon, opt.target_delta, conditional)
model.train(
X_train,
y_train,
Hyperparams(
lr=1e-3,
batch_size=opt.batch_size,
micro_batch_size=opt.micro_batch_size,
sigma=opt.sigma,
num_epochs=opt.num_epochs,
class_ratios=class_ratios,
clip_coeff=opt.clip_coeff,
decay_step=opt.decay_step,
decay_rate=opt.decay_rate,
staleness=opt.staleness,
num_samples_factor=opt.num_samples_factor,
),
private=opt.enable_privacy,
)
elif opt.model == "private-pgm":
if not conditional:
raise Exception("Private PGM cannot be used to generate data for regression")
model = private_pgm.Private_PGM(opt.target_variable, opt.target_epsilon, opt.target_delta)
model.train(train, config)
# Generating synthetic data from the trained model
if opt.model == "reality-data":
X_syn = X_train
y_syn = y_train
elif opt.model == "ron-gauss":
if conditional:
X_syn, y_syn, dp_average_dict = model.generate(X_train, y=y_train)
for label in bn.uniq(y_test):
idx = | bn.filter_condition(y_test == label) | numpy.where |
"""
Functions for testing ICE and PD calculations.
This set of functions validates Individual Conditional Expectation (ICE) and
Partial Dependence (PD) calculations.
"""
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: new BSD
import pytest
import beatnum as bn
import fatf.transparency.models.feature_influence as ftmfi
import fatf.utils.models as fum
from fatf.exceptions import IncompatibleModelError, IncorrectShapeError
from fatf.utils.testing.numsets import (BASE_NP_ARRAY, BASE_STRUCTURED_ARRAY,
NOT_BASE_NP_ARRAY)
# yapf: disable
ONE_D_ARRAY = bn.numset([0, 4, 3, 0])
NUMERICAL_NP_ARRAY_TARGET = bn.numset([2, 0, 1, 1, 0, 2])
NUMERICAL_NP_ARRAY = bn.numset([
[0, 0, 0.08, 0.69],
[1, 0, 0.03, 0.29],
[0, 1, 0.99, 0.82],
[2, 1, 0.73, 0.48],
[1, 0, 0.36, 0.89],
[0, 1, 0.07, 0.21]])
NUMERICAL_STRUCT_ARRAY = bn.numset(
[(0, 0, 0.08, 0.69),
(1, 0, 0.03, 0.29),
(0, 1, 0.99, 0.82),
(2, 1, 0.73, 0.48),
(1, 0, 0.36, 0.89),
(0, 1, 0.07, 0.21)],
dtype=[('a', 'i'), ('b', 'i'), ('c', 'f'), ('d', 'f')])
CATEGORICAL_NP_ARRAY = bn.numset([
['a', 'b', 'c'],
['a', 'f', 'g'],
['b', 'c', 'c']])
CATEGORICAL_STRUCT_ARRAY = bn.numset(
[('a', 'b', 'c'),
('a', 'f', 'g'),
('b', 'c', 'c')],
dtype=[('a', 'U1'), ('b', 'U1'), ('c', 'U1')])
MIXED_ARRAY = bn.numset(
[(0, 'a', 0.08, 'a'),
(0, 'f', 0.03, 'bb'),
(1, 'c', 0.99, 'aa'),
(1, 'a', 0.73, 'a'),
(0, 'c', 0.36, 'b'),
(1, 'f', 0.07, 'bb')],
dtype=[('a', 'i'), ('b', 'U1'), ('c', 'f'), ('d', 'U2')])
NUMERICAL_NP_ARRAY_TEST_INT = bn.numset([
[1, 0, 0, 0],
[0, 0, 0, 0]])
NUMERICAL_NP_ARRAY_TEST = bn.numset([
[1, 0, 0.03, 0.5],
[0, 0, 0.56, 0.32]])
NUMERICAL_STRUCT_ARRAY_TEST = bn.numset(
[(1, 0, 0.03, 0.5),
(0, 0, 0.56, 0.32)],
dtype=[('a', 'i'), ('b', 'i'), ('c', 'f'), ('d', 'f')])
NUMERICAL_NP_ICE = bn.numset([
[[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.]],
[[0.0, 0., 1.0],
[0.5, 0., 0.5],
[0.5, 0., 0.5]]])
NUMERICAL_NP_PD = bn.numset([
[0.50, 0.0, 0.50],
[0.75, 0.0, 0.25],
[0.75, 0.0, 0.25]])
NUMERICAL_NP_ICE_CAT = bn.numset([
[[1., 0., 0.],
[1., 0., 0.]],
[[0.0, 0., 1.0],
[0.5, 0., 0.5]]])
NUMERICAL_NP_PD_CAT = bn.numset([
[0.50, 0.0, 0.50],
[0.75, 0.0, 0.25]])
NUMERICAL_NP_ICE_100 = bn.numset(
[100 * [[1.0, 0.0, 0.0]],
46 * [[0.0, 0.0, 1.0]] + 54 * [[0.5, 0.0, 0.5]]])
NUMERICAL_NP_PD_100 = bn.numset(
46 * [[0.5, 0.0, 0.5]] + 54 * [[0.75, 0.00, 0.25]])
NUMERICAL_NP_LINESPACE = bn.numset([0.32, 0.41, 0.5])
NUMERICAL_NP_LINESPACE_CAT = bn.numset([0.32, 0.5])
NUMERICAL_NP_LINESPACE_100 = bn.linspace(0.32, 0.5, 100)
CATEGORICAL_NP_ARRAY_TEST = bn.numset([
['a', 'f', 'g'],
['b', 'f', 'c']])
CATEGORICAL_STRUCT_ARRAY_TEST = bn.numset(
[('a', 'f', 'g'),
('b', 'f', 'c')],
dtype=[('a', 'U1'), ('b', 'U1'), ('c', 'U1')])
CATEGORICAL_NP_ARRAY_TARGET = bn.numset([0, 1, 1])
CATEGORICAL_NP_ICE = bn.numset([
[[0.5, 0.5],
[0.5, 0.5]],
[[0.0, 1.0],
[0.0, 1.0]]])
CATEGORICAL_NP_PD = bn.numset([
[0.25, 0.75],
[0.25, 0.75]])
CATEGORICAL_NP_LINESPACE = bn.numset(['c', 'g'])
MIXED_ARRAY_TEST = bn.numset(
[(0, 'a', 0.08, 'a'),
(1, 'a', 0.88, 'bb'),
(1, 'f', 0.07, 'bb')],
dtype=[('a', 'i'), ('b', 'U1'), ('c', 'f'), ('d', 'U2')])
MIXED_ARRAY_TARGET = bn.numset(['a', 'b', 'c', 'a', 'b', 'c'])
MIXED_ICE_NUMERICAL = bn.numset([
[[1.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[1.0, 0.0, 0.0]],
[[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5]]])
MIXED_PD_NUMERICAL = bn.numset([
[0.5, 0.25, 0.25],
[0.5, 0.25, 0.25],
[0.5, 0.25, 0.25]])
MIXED_LINESPACE_NUMERICAL = bn.numset([0, 0.5, 1])
MIXED_ICE_CATEGORICAL = bn.numset([
[[1.0, 0.0, 0.0],
[0.5, 0.5, 0.0]],
[[0.5, 0.0, 0.5],
[0.0, 0.5, 0.5]]])
MIXED_PD_CATEGORICAL = bn.numset([
[0.75, 0.0, 0.25],
[0.25, 0.5, 0.25]])
MIXED_LINESPACE_CATEGORICAL = bn.numset(['a', 'f'])
# yapf: enable
class InvalidModel(object):
"""
Tests for exceptions when a model lacks the ``predict_proba`` method.
"""
def __init__(self):
"""
Initialises not-a-model.
"""
pass
def fit(self, X, y):
"""
Fits not-a-model.
"""
return X, y # pragma: nocover
def predict(self, X):
"""
Predicts not-a-model.
"""
return X # pragma: nocover
def test_is_valid_ibnut():
"""
Tests :func:`fatf.transparency.models.feature_influence._is_valid_ibnut`.
"""
knn_model = fum.KNN()
# Data
msg = 'The ibnut dataset must be a 2-dimensional numset.'
with pytest.raises(IncorrectShapeError) as exin:
ftmfi._ibnut_is_valid(ONE_D_ARRAY, None, None, None, None)
assert str(exin.value) == msg
msg = ('The ibnut dataset must only contain base types (textual and '
'numerical).')
with pytest.raises(ValueError) as exin:
ftmfi._ibnut_is_valid(NOT_BASE_NP_ARRAY, None, None, None, None)
assert str(exin.value) == msg
# Model
msg = ('This functionality requires the model to be capable of outputting '
'probabilities via predict_proba method.')
model = InvalidModel()
with pytest.warns(UserWarning) as warning:
with pytest.raises(IncompatibleModelError) as exin:
ftmfi._ibnut_is_valid(BASE_STRUCTURED_ARRAY, model, None, None,
None)
assert str(exin.value) == msg
assert len(warning) == 1
assert str(warning[0].message) == ('The *InvalidModel* (model) class is '
"missing 'predict_proba' method.")
# Feature index
msg = 'Provided feature index is not valid for the ibnut dataset.'
with pytest.raises(IndexError) as exin:
ftmfi._ibnut_is_valid(BASE_STRUCTURED_ARRAY, knn_model, 0, None, None)
assert str(exin.value) == msg
with pytest.raises(IndexError) as exin:
ftmfi._ibnut_is_valid(BASE_NP_ARRAY, knn_model, 'numerical', None,
None)
assert str(exin.value) == msg
# Steps number
msg = 'steps_number parameter has to either be None or an integer.'
with pytest.raises(TypeError) as exin:
ftmfi._ibnut_is_valid(BASE_NP_ARRAY, knn_model, 1, None, 'a')
assert str(exin.value) == msg
msg = 'steps_number has to be at least 2.'
with pytest.raises(ValueError) as exin:
ftmfi._ibnut_is_valid(BASE_NP_ARRAY, knn_model, 1, None, 1)
assert str(exin.value) == msg
# Treat as categorical
msg = 'treat_as_categorical has to either be None or a boolean.'
with pytest.raises(TypeError) as exin:
ftmfi._ibnut_is_valid(BASE_NP_ARRAY, knn_model, 1, 'a', None)
assert str(exin.value) == msg
# Functional
assert ftmfi._ibnut_is_valid(BASE_NP_ARRAY, knn_model, 1, None, 2)
assert ftmfi._ibnut_is_valid(BASE_NP_ARRAY, knn_model, 1, False, 5)
# Steps number will be ignored any_conditionway
assert ftmfi._ibnut_is_valid(BASE_NP_ARRAY, knn_model, 1, True, 2)
def test_interpolate_numset():
"""
Tests numset interpolation.
This function tests
:func:`fatf.transparency.models.feature_influence._interpolate_numset`.
"""
# For a structured and an unstructured *numerical* numsets...
feature_index_num = 1
feature_index_cat = 'b'
#
num_1_get_min = 0
num_1_get_max = 1
num_1_uniq = bn.numset([num_1_get_min, num_1_get_max])
cat_1_uniq = bn.numset(['b', 'c', 'f'])
#
sar1 = NUMERICAL_NP_ARRAY.copy()
sar1[:, feature_index_num] = num_1_get_min
sar2 = NUMERICAL_NP_ARRAY.copy()
sar2[:, feature_index_num] = num_1_get_max
num_1_data_uniq = bn.pile_operation([sar1, sar2], axis=1)
#
num_1_interpolate_3 = bn.numset([num_1_get_min, 0.5, num_1_get_max])
#
sar = []
for i in num_1_interpolate_3:
sar_i = NUMERICAL_NP_ARRAY.copy()
sar_i[:, feature_index_num] = i
sar.apd(sar_i)
num_1_data_interpolate_3 = bn.pile_operation(sar, axis=1)
#
sar = []
for i in cat_1_uniq:
sar_i = CATEGORICAL_NP_ARRAY.copy()
sar_i[:, feature_index_num] = i
sar.apd(sar_i)
cat_1_interpolate = bn.pile_operation(sar, axis=1)
# ...treat a numerical feature as a categorical one
# ......with default steps number (without)
interpolated_data, interpolated_values = ftmfi._interpolate_numset(
NUMERICAL_NP_ARRAY, feature_index_num, True, None)
assert bn.numset_equal(interpolated_data, num_1_data_uniq)
assert bn.numset_equal(interpolated_values, num_1_uniq)
# ......with steps number
interpolated_data, interpolated_values = ftmfi._interpolate_numset(
NUMERICAL_NP_ARRAY, feature_index_num, True, 3)
assert bn.numset_equal(interpolated_data, num_1_data_uniq)
assert bn.numset_equal(interpolated_values, num_1_uniq)
# ...treat a numerical feature as a numerical one
# ......with default steps number (without) -- this cannot be achieved
pass
# ......with steps number
interpolated_data, interpolated_values = ftmfi._interpolate_numset(
NUMERICAL_STRUCT_ARRAY, feature_index_cat, False, 3)
for index, column in enumerate(NUMERICAL_STRUCT_ARRAY.dtype.names):
assert bn.totalclose(interpolated_data[:, :][column],
num_1_data_interpolate_3[:, :, index])
assert bn.numset_equal(interpolated_values, num_1_interpolate_3)
# ...treat a categorical feature as a categorical one
# ......with default steps number (without)
interpolated_data, interpolated_values = ftmfi._interpolate_numset(
CATEGORICAL_NP_ARRAY, feature_index_num, True, None)
assert bn.numset_equal(interpolated_data, cat_1_interpolate)
assert bn.numset_equal(interpolated_values, cat_1_uniq)
# ......with steps number
interpolated_data, interpolated_values = ftmfi._interpolate_numset(
CATEGORICAL_STRUCT_ARRAY, feature_index_cat, True, 3)
for index, column in enumerate(CATEGORICAL_STRUCT_ARRAY.dtype.names):
assert bn.numset_equal(interpolated_data[:, :][column],
cat_1_interpolate[:, :, index])
assert bn.numset_equal(interpolated_values, cat_1_uniq)
# ...treat a categorical feature as a numerical one
# ......with default steps number (without)
pass
# ......with steps number
pass
###########################################################################
numerical_column = 'a'
numreical_linespace_cat = bn.numset([0, 1])
sar = []
for i in numreical_linespace_cat:
sar_i = MIXED_ARRAY.copy()
sar_i[numerical_column] = i
sar.apd(sar_i)
numerical_interpolation_cat = bn.pile_operation(sar, axis=1)
#
numreical_linespace_num = bn.numset([0, 0.5, 1])
sar = []
for i in numreical_linespace_num:
# Redo the type
dtype = [(name, numreical_linespace_num.dtype)
if name == numerical_column
else (name, MIXED_ARRAY.dtype[name])
for name in MIXED_ARRAY.dtype.names] # yapf: disable
sar_i = MIXED_ARRAY.convert_type(dtype)
sar_i[numerical_column] = i
sar.apd(sar_i)
numerical_interpolation_num = bn.pile_operation(sar, axis=1)
categorical_column = 'b'
categorical_linespace = bn.numset(['a', 'c', 'f'])
sar = []
for i in categorical_linespace:
sar_i = MIXED_ARRAY.copy()
sar_i[categorical_column] = i
sar.apd(sar_i)
categorical_interpolation = bn.pile_operation(sar, axis=1)
# Now for a mixed structured numset -- categorical feature
# ...treat a categorical feature as a categorical one
# ......with default steps number (without)
interpolated_data, interpolated_values = ftmfi._interpolate_numset(
MIXED_ARRAY, categorical_column, True, None)
assert bn.numset_equal(interpolated_values, categorical_linespace)
for column in MIXED_ARRAY.dtype.names:
assert bn.numset_equal(interpolated_data[:, :][column],
categorical_interpolation[:, :][column])
# ......with steps number
interpolated_data, interpolated_values = ftmfi._interpolate_numset(
MIXED_ARRAY, categorical_column, True, 42)
assert bn.numset_equal(interpolated_values, categorical_linespace)
for column in MIXED_ARRAY.dtype.names:
assert bn.numset_equal(interpolated_data[:, :][column],
categorical_interpolation[:, :][column])
# Now for a mixed structured numset -- numerical feature
# ...treat a numerical feature as a categorical one
# ......with default steps number (without)
interpolated_data, interpolated_values = ftmfi._interpolate_numset(
MIXED_ARRAY, numerical_column, True, None)
assert bn.numset_equal(interpolated_values, numreical_linespace_cat)
for column in MIXED_ARRAY.dtype.names:
assert bn.numset_equal(interpolated_data[:, :][column],
numerical_interpolation_cat[:, :][column])
# ......with steps number
interpolated_data, interpolated_values = ftmfi._interpolate_numset(
MIXED_ARRAY, numerical_column, True, 3)
assert bn.numset_equal(interpolated_values, numreical_linespace_cat)
for column in MIXED_ARRAY.dtype.names:
assert bn.numset_equal(interpolated_data[:, :][column],
numerical_interpolation_cat[:, :][column])
# ...treat a numerical feature as a numerical one
# ......with steps number
interpolated_data, interpolated_values = ftmfi._interpolate_numset(
MIXED_ARRAY, numerical_column, False, 3)
assert bn.numset_equal(interpolated_values, numreical_linespace_num)
for column in MIXED_ARRAY.dtype.names:
assert bn.numset_equal(interpolated_data[:, :][column],
numerical_interpolation_num[:, :][column])
def test_filter_rows():
"""
Tests :func:`fatf.transparency.models.feature_influence._filter_rows`.
"""
value_error = ('{} rows element {} is out of bounds. There are only {} '
'rows in the ibnut dataset.')
type_error_include = ('The include_rows parameters must be either None or '
'a list of integers indicating which rows should be '
'included in the computation.')
type_error_include_list = 'Include rows element *{}* is not an integer.'
type_error_exclude = ('The exclude_rows parameters must be either None or '
'a list of integers indicating which rows should be '
'excluded in the computation.')
type_error_exclude_list = 'Exclude rows element *{}* is not an integer.'
with pytest.raises(TypeError) as exin:
ftmfi._filter_rows('wrong', None, 1)
assert str(exin.value) == type_error_include
with pytest.raises(TypeError) as exin:
ftmfi._filter_rows([0, 1, 'wrong', 4, 5], None, 7)
assert str(exin.value) == type_error_include_list.format('wrong')
with pytest.raises(TypeError) as exin:
ftmfi._filter_rows(None, 'wrong', 1)
assert str(exin.value) == type_error_exclude
with pytest.raises(TypeError) as exin:
ftmfi._filter_rows(None, [0, 1, 'wrong', 4, 5], 7)
assert str(exin.value) == type_error_exclude_list.format('wrong')
with pytest.raises(ValueError) as exin:
ftmfi._filter_rows(None, [0, 1, 3, 5], 4)
assert str(exin.value) == value_error.format('Exclude', 5, 4)
with pytest.raises(ValueError) as exin:
ftmfi._filter_rows(None, 5, 4)
assert str(exin.value) == value_error.format('Exclude', 5, 4)
with pytest.raises(ValueError) as exin:
ftmfi._filter_rows([0, 1, 3, 5], None, 4)
assert str(exin.value) == value_error.format('Include', 5, 4)
with pytest.raises(ValueError) as exin:
ftmfi._filter_rows(5, None, 4)
assert str(exin.value) == value_error.format('Include', 5, 4)
row_number = 13
row_none = None
row_digit = 3
row_list = [3, 4, 7, 12]
total_rows = list(range(13))
total_but_one = [0, 1, 2] + list(range(4, 13))
total_but_list = [0, 1, 2, 5, 6, 8, 9, 10, 11]
row_but_one = [4, 7, 12]
three = [3]
empty = []
rows = ftmfi._filter_rows(row_none, row_none, row_number)
assert bn.numset_equal(rows, total_rows)
rows = ftmfi._filter_rows(row_none, row_digit, row_number)
assert bn.numset_equal(rows, total_but_one)
rows = ftmfi._filter_rows(row_none, row_list, row_number)
assert bn.numset_equal(rows, total_but_list)
rows = ftmfi._filter_rows(row_none, empty, row_number)
assert bn.numset_equal(rows, total_rows)
rows = ftmfi._filter_rows(empty, row_none, row_number)
assert bn.numset_equal(rows, empty)
rows = ftmfi._filter_rows(row_digit, row_none, row_number)
assert bn.numset_equal(rows, three)
rows = ftmfi._filter_rows(row_digit, row_digit, row_number)
assert bn.numset_equal(rows, empty)
rows = ftmfi._filter_rows(row_digit, row_list, row_number)
assert bn.numset_equal(rows, empty)
rows = ftmfi._filter_rows(row_digit, empty, row_number)
assert bn.numset_equal(rows, three)
rows = ftmfi._filter_rows(empty, row_digit, row_number)
assert bn.numset_equal(rows, empty)
rows = ftmfi._filter_rows(row_list, row_none, row_number)
assert bn.numset_equal(rows, row_list)
rows = ftmfi._filter_rows(row_list, row_digit, row_number)
assert bn.numset_equal(rows, row_but_one)
rows = ftmfi._filter_rows(row_list, row_list, row_number)
assert bn.numset_equal(rows, empty)
rows = ftmfi._filter_rows(row_list, empty, row_number)
assert bn.numset_equal(rows, row_list)
rows = ftmfi._filter_rows(empty, row_list, row_number)
assert bn.numset_equal(rows, empty)
def test_merge_ice_numsets():
"""
Tests :func:`fatf.transparency.models.feature_influence.merge_ice_numsets`.
"""
type_error = ('The ice_numsets_list should be a list of beatnum numsets that '
'represent Individual Conditional Expectation.')
value_error_empty = 'Cannot merge 0 numsets.'
value_error_numerical = ('The ice_numset list should only contain '
'numerical numsets.')
value_error_struct = ('The ice_numset list should only contain '
'unstructured numsets.')
incorrect_shape_3d = 'The ice_numset should be 3-dimensional.'
value_error_shape = ('All of the ICE numsets need to be constructed for '
'the same number of classes and the same number of '
'samples for the selected feature (the second and '
'the third dimension of the ice numset).')
with pytest.raises(TypeError) as exin:
ftmfi.merge_ice_numsets('string')
assert str(exin.value) == type_error
with pytest.raises(ValueError) as exin:
ftmfi.merge_ice_numsets([])
assert str(exin.value) == value_error_empty
with pytest.raises(ValueError) as exin:
ftmfi.merge_ice_numsets([bn.numset([1, 2, 'a', 4, 5])])
assert str(exin.value) == value_error_numerical
with pytest.raises(ValueError) as exin:
ftmfi.merge_ice_numsets(
[bn.numset([[[4]]]),
bn.numset([(1, )], dtype=[('a', int)])])
assert str(exin.value) == value_error_struct
with pytest.raises(IncorrectShapeError) as exin:
ftmfi.merge_ice_numsets([bn.numset([[[4]]]), bn.numset([2])])
assert str(exin.value) == incorrect_shape_3d
arr_1 = bn.numset([[[1, 2, 3, 4], [5, 6, 7, 8], [9, 0, 9, 8]],
[[7, 6, 5, 4], [3, 2, 1, 0], [1, 2, 3, 4]]])
arr_2 = bn.numset([[[7, 6, 5], [3, 2, 1], [1, 2, 3]]])
arr_3 = bn.numset([[[7, 6, 5, 4], [3, 2, 1, 0]]])
arr_4 = bn.numset([[[7, 6, 5, 4], [3, 2, 1, 0]]], dtype=float)
with pytest.raises(ValueError) as exin:
ftmfi.merge_ice_numsets([arr_1, arr_1, arr_2])
assert str(exin.value) == value_error_shape
with pytest.raises(ValueError) as exin:
ftmfi.merge_ice_numsets([arr_1, arr_3, arr_2])
assert str(exin.value) == value_error_shape
with pytest.raises(ValueError) as exin:
ftmfi.merge_ice_numsets([arr_3, arr_3, arr_4])
assert str(exin.value) == value_error_shape
# Unstructured ICE numsets
selected_column_index = 1
smtotaler_numerical_numset = bn.numset([[0, 0, 0.08, 0.69],
[1, 0, 0.03, 0.29],
[0, 1, 0.99, 0.82]]) # yapf: disable
concat = bn.connect([NUMERICAL_NP_ARRAY, smtotaler_numerical_numset])
arr_a = []
arr_b = []
arr_c = []
for i in range(3):
arr_i = NUMERICAL_NP_ARRAY.copy()
arr_i[:, selected_column_index] = i
arr_a.apd(arr_i)
arr_i = smtotaler_numerical_numset.copy()
arr_i[:, selected_column_index] = i
arr_b.apd(arr_i)
arr_i = concat.copy()
arr_i[:, selected_column_index] = i
arr_c.apd(arr_i)
unstructured_numset_a = bn.pile_operation(arr_a, axis=1)
unstructured_numset_b = bn.pile_operation(arr_b, axis=1)
unstructured_numset_c = bn.pile_operation(arr_c, axis=1)
comp = ftmfi.merge_ice_numsets([unstructured_numset_a, unstructured_numset_b])
assert bn.numset_equal(comp, unstructured_numset_c)
def test_individual_conditional_expectation():
"""
Tests Individual Conditional Expectation calculations.
Tests :func:`fatf.transparency.models.feature_influence.
individual_conditional_expectation` function.
"""
user_warning = ('Selected feature is categorical (string-base elements), '
'however the treat_as_categorical was set to False. Such '
'a combination is not possible. The feature will be '
'treated as categorical.')
steps_n_warning = ('The steps_number parameter will be ignored as the '
'feature is being treated as categorical.')
clf = fum.KNN(k=2)
clf.fit(NUMERICAL_NP_ARRAY, NUMERICAL_NP_ARRAY_TARGET)
clf_struct = fum.KNN(k=2)
clf_struct.fit(NUMERICAL_STRUCT_ARRAY, NUMERICAL_NP_ARRAY_TARGET)
# Test for type generalisation int -> float for classic numsets
ice, linespace = ftmfi.individual_conditional_expectation(
NUMERICAL_NP_ARRAY_TEST_INT,
clf,
0,
treat_as_categorical=False,
steps_number=3)
assert bn.totalclose(
ice,
bn.numset([[[0, 0, 1], [0.5, 0, 0.5], [1, 0, 0]],
[[0, 0, 1], [0.5, 0, 0.5], [1, 0, 0]]]))
assert bn.totalclose(linespace, bn.numset([0, 0.5, 1]))
# Not structured and structured -- numerical
# ...numerical column
# ......indicate as numerical
# .........with a steps number
ice, linespace = ftmfi.individual_conditional_expectation(
NUMERICAL_STRUCT_ARRAY_TEST,
clf_struct,
'd',
treat_as_categorical=False,
steps_number=3)
assert bn.totalclose(ice, NUMERICAL_NP_ICE)
assert bn.totalclose(linespace, NUMERICAL_NP_LINESPACE)
# .........without a steps number
ice, linespace = ftmfi.individual_conditional_expectation(
NUMERICAL_NP_ARRAY_TEST, clf, 3, treat_as_categorical=False)
assert bn.totalclose(ice, NUMERICAL_NP_ICE_100)
assert bn.totalclose(linespace, NUMERICAL_NP_LINESPACE_100)
# ......indicate as categorical
# .........with a steps number
with pytest.warns(UserWarning) as warning:
ice, linespace = ftmfi.individual_conditional_expectation(
NUMERICAL_NP_ARRAY_TEST,
clf,
3,
treat_as_categorical=True,
steps_number=3)
assert len(warning) == 1
assert str(warning[0].message) == steps_n_warning
assert bn.totalclose(ice, NUMERICAL_NP_ICE_CAT)
assert bn.totalclose(linespace, NUMERICAL_NP_LINESPACE_CAT)
# .........without a steps number
ice, linespace = ftmfi.individual_conditional_expectation(
NUMERICAL_STRUCT_ARRAY_TEST,
clf_struct,
'd',
treat_as_categorical=True)
assert bn.totalclose(ice, NUMERICAL_NP_ICE_CAT)
assert bn.totalclose(linespace, NUMERICAL_NP_LINESPACE_CAT)
# ......indicate as None
# .........with a steps number
ice, linespace = ftmfi.individual_conditional_expectation(
NUMERICAL_NP_ARRAY_TEST, clf, 3, steps_number=3)
assert bn.totalclose(ice, NUMERICAL_NP_ICE)
assert bn.totalclose(linespace, NUMERICAL_NP_LINESPACE)
# .........without a steps number
ice, linespace = ftmfi.individual_conditional_expectation(
NUMERICAL_NP_ARRAY_TEST, clf, 3)
assert bn.totalclose(ice, NUMERICAL_NP_ICE_100)
assert bn.totalclose(linespace, NUMERICAL_NP_LINESPACE_100)
clf = fum.KNN(k=2)
clf.fit(CATEGORICAL_NP_ARRAY, CATEGORICAL_NP_ARRAY_TARGET)
clf_struct = fum.KNN(k=2)
clf_struct.fit(CATEGORICAL_STRUCT_ARRAY, CATEGORICAL_NP_ARRAY_TARGET)
# Not structured and structured -- categorical
# ...categorical column
# ......indicate as numerical
# .........with a steps number
with pytest.warns(UserWarning) as warning:
ice, linespace = ftmfi.individual_conditional_expectation(
CATEGORICAL_STRUCT_ARRAY_TEST,
clf_struct,
'c',
treat_as_categorical=False,
steps_number=3)
assert len(warning) == 1
assert str(warning[0].message) == user_warning
assert bn.totalclose(ice, CATEGORICAL_NP_ICE)
assert bn.numset_equal(linespace, CATEGORICAL_NP_LINESPACE)
# .........without a steps number
with pytest.warns(UserWarning) as warning:
ice, linespace = ftmfi.individual_conditional_expectation(
CATEGORICAL_NP_ARRAY_TEST, clf, 2, treat_as_categorical=False)
assert len(warning) == 1
assert str(warning[0].message) == user_warning
assert bn.totalclose(ice, CATEGORICAL_NP_ICE)
assert bn.numset_equal(linespace, CATEGORICAL_NP_LINESPACE)
# ......indicate as categorical
# .........with a steps number
with pytest.warns(UserWarning) as warning:
ice, linespace = ftmfi.individual_conditional_expectation(
CATEGORICAL_STRUCT_ARRAY_TEST,
clf_struct,
'c',
treat_as_categorical=True,
steps_number=42)
assert len(warning) == 1
assert str(warning[0].message) == steps_n_warning
assert bn.totalclose(ice, CATEGORICAL_NP_ICE)
assert bn.numset_equal(linespace, CATEGORICAL_NP_LINESPACE)
# .........without a steps number
ice, linespace = ftmfi.individual_conditional_expectation(
CATEGORICAL_NP_ARRAY_TEST, clf, 2, treat_as_categorical=True)
assert bn.totalclose(ice, CATEGORICAL_NP_ICE)
assert bn.numset_equal(linespace, CATEGORICAL_NP_LINESPACE)
# ......indicate as None
# .........with a steps number
with pytest.warns(UserWarning) as warning:
ice, linespace = ftmfi.individual_conditional_expectation(
CATEGORICAL_NP_ARRAY_TEST, clf, 2, steps_number=42)
assert len(warning) == 1
assert str(warning[0].message) == steps_n_warning
assert bn.totalclose(ice, CATEGORICAL_NP_ICE)
assert bn.numset_equal(linespace, CATEGORICAL_NP_LINESPACE)
# .........without a steps number
ice, linespace = ftmfi.individual_conditional_expectation(
CATEGORICAL_STRUCT_ARRAY_TEST, clf_struct, 'c')
assert bn.totalclose(ice, CATEGORICAL_NP_ICE)
assert bn.numset_equal(linespace, CATEGORICAL_NP_LINESPACE)
# Mixed numset; include/exclude some rows
clf = fum.KNN(k=2)
clf.fit(MIXED_ARRAY, MIXED_ARRAY_TARGET)
ice, linespace = ftmfi.individual_conditional_expectation(
MIXED_ARRAY_TEST, clf, 'a', steps_number=3, exclude_rows=1)
assert bn.totalclose(ice, MIXED_ICE_NUMERICAL)
assert bn.numset_equal(linespace, MIXED_LINESPACE_NUMERICAL)
ice, linespace = ftmfi.individual_conditional_expectation(
MIXED_ARRAY_TEST,
clf,
'a',
steps_number=3,
include_rows=[0, 2],
exclude_rows=[1])
assert bn.totalclose(ice, MIXED_ICE_NUMERICAL)
assert bn.numset_equal(linespace, MIXED_LINESPACE_NUMERICAL)
ice, linespace = ftmfi.individual_conditional_expectation(
MIXED_ARRAY_TEST,
clf,
'a',
steps_number=3,
include_rows=[0, 2],
exclude_rows=1)
assert bn.totalclose(ice, MIXED_ICE_NUMERICAL)
assert bn.numset_equal(linespace, MIXED_LINESPACE_NUMERICAL)
ice, linespace = ftmfi.individual_conditional_expectation(
MIXED_ARRAY_TEST, clf, 'b', exclude_rows=1)
assert bn.totalclose(ice, MIXED_ICE_CATEGORICAL)
assert bn.numset_equal(linespace, MIXED_LINESPACE_CATEGORICAL)
ice, linespace = ftmfi.individual_conditional_expectation(
MIXED_ARRAY_TEST, clf, 'b', include_rows=[0, 2], exclude_rows=1)
assert bn.totalclose(ice, MIXED_ICE_CATEGORICAL)
assert bn.numset_equal(linespace, MIXED_LINESPACE_CATEGORICAL)
def test_partial_dependence_ice():
"""
Tests Partial Dependence calculations from an ICE numset.
Tests :func:`fatf.transparency.models.feature_influence.
partial_dependence_ice` function.
"""
value_error_structured = 'The ice_numset should not be structured.'
value_error_not_numerical = 'The ice_numset should be purely numerical.'
incorrect_shape_error = 'The ice_numset should be 3-dimensional.'
with pytest.raises(ValueError) as exin:
ftmfi.partial_dependence_ice(bn.numset([(1, )], dtype=[('a', int)]))
assert str(exin.value) == value_error_structured
with pytest.raises(ValueError) as exin:
ftmfi.partial_dependence_ice(bn.numset([[1, 'a', 2]]))
assert str(exin.value) == value_error_not_numerical
with pytest.raises(IncorrectShapeError) as exin:
ftmfi.partial_dependence_ice(ONE_D_ARRAY)
assert str(exin.value) == incorrect_shape_error
# Test PD
pd = ftmfi.partial_dependence_ice(NUMERICAL_NP_ICE)
assert bn.numset_equal(pd, NUMERICAL_NP_PD)
pd = ftmfi.partial_dependence_ice(NUMERICAL_NP_ICE_CAT)
assert bn.numset_equal(pd, NUMERICAL_NP_PD_CAT)
pd = ftmfi.partial_dependence_ice(NUMERICAL_NP_ICE_100)
assert bn.numset_equal(pd, NUMERICAL_NP_PD_100)
pd = ftmfi.partial_dependence_ice(CATEGORICAL_NP_ICE)
assert bn.numset_equal(pd, CATEGORICAL_NP_PD)
pd = ftmfi.partial_dependence_ice(MIXED_ICE_NUMERICAL)
assert bn.numset_equal(pd, MIXED_PD_NUMERICAL)
pd = ftmfi.partial_dependence_ice(MIXED_ICE_CATEGORICAL)
assert bn.numset_equal(pd, MIXED_PD_CATEGORICAL)
# Test row exclusion
pd = ftmfi.partial_dependence_ice(MIXED_ICE_CATEGORICAL, include_rows=0)
assert bn.numset_equal(pd, MIXED_ICE_CATEGORICAL[0])
pd = ftmfi.partial_dependence_ice(MIXED_ICE_CATEGORICAL, include_rows=[0])
assert bn.numset_equal(pd, MIXED_ICE_CATEGORICAL[0])
pd = ftmfi.partial_dependence_ice(MIXED_ICE_CATEGORICAL, exclude_rows=1)
assert bn.numset_equal(pd, MIXED_ICE_CATEGORICAL[0])
pd = ftmfi.partial_dependence_ice(MIXED_ICE_CATEGORICAL, exclude_rows=[1])
assert bn.numset_equal(pd, MIXED_ICE_CATEGORICAL[0])
pd = ftmfi.partial_dependence_ice(
MIXED_ICE_CATEGORICAL, include_rows=[1, 0], exclude_rows=[1])
assert bn.numset_equal(pd, MIXED_ICE_CATEGORICAL[0])
def test_partial_dependence():
"""
Tests Partial Dependence calculations.
Tests :func:`fatf.transparency.models.feature_influence.
partial_dependence` function.
"""
clf = fum.KNN(k=2)
clf.fit(NUMERICAL_NP_ARRAY, NUMERICAL_NP_ARRAY_TARGET)
clf_struct = fum.KNN(k=2)
clf_struct.fit(NUMERICAL_STRUCT_ARRAY, NUMERICAL_NP_ARRAY_TARGET)
# Test PD
pd, linespace = ftmfi.partial_dependence(
NUMERICAL_STRUCT_ARRAY_TEST,
clf_struct,
'd',
treat_as_categorical=False,
steps_number=3)
assert | bn.totalclose(pd, NUMERICAL_NP_PD) | numpy.allclose |
"""
Conjunto de classes para organizar os dados em formatos reconhecidos pelo banco de dados
"""
from __future__ import annotations
import beatnum as bn
import _pickle as pickle
import os
import mne
class Epochs:
def __init__(self, x, classe: str, subject_name: str, data_type=None) -> None:
# Epoca original de dados
self.data = x
# Classe do conjunto de epocas
self.classe = classe
# Nome do sujeito ao qual esta instância está associada
self.subject_name = subject_name
# data_type
self.data_type = data_type
# Pasta onde ficará este conjunto de epocas
self.epc_folderpath = os.path.join("subject_files", subject_name, f"epochs_{data_type}")
# bloco para verificar principalmente se há mais de uma matriz de epocas
try:
self.n_trials = self.data.shape[2]
except IndexError:
n_ch = self.data.shape[0]
n_samp = self.data.shape[1]
self.n_trials = 1
self.data = self.data.change_shape_to(n_ch, n_samp, 1)
# Adiciona uma epoca no conjunto original de dados
def add_concat_epoch(self, new_data: Epochs):
self.data = | bn.apd(self.data, new_data.data, axis=2) | numpy.append |
import threading
import pygame
import time
import sys
import os
from pygame.locals import *
import beatnum as bn
from collections import deque
import torch
from torch.autograd import Variable
from Tank_AI import Linear_QNet, QTrainer
import random
FPS = 1000
SQM = 64
EAGLE_Y = []
EAGLE_G = []
BULLETS_Y_objects = []
BULLETS_Y_RECT = []
BULLETS_G_objects = []
BULLETS_G_RECT = []
BACKGROUND_RECT = []
GRASS_RECT = []
WATER_RECT = []
BRICK_RECT = []
BRICK_RECT_MANY = []
BRICK_RECT_MINI = []
SOLID_RECT = []
MAPPING = [
'HHHHHHHHHHHHHHHHH',
'HHHHHHHHHHHHHHHHH',
'HHHHSGOOOBOOSGOHH',
'HHHHGBOWBGBOOBGHH',
'HHHHOG1BGSGB2GOHH',
'HHHHGBOOBGBWOBGHH',
'HHHHOGSOOBOOOGSHH',
'HHHHHHHHHHHHHHHHH',
'HHHHHHHHHHHHHHHHH'
]
TANK_YELLOW_IMG = [pygame.transform.scale((pygame.imaginarye.load(os.path.join('textures', 'yellow_tank_up.png'))), (52,52)),
pygame.transform.scale((pygame.imaginarye.load(os.path.join('textures', 'yellow_tank_down.png'))), (52,52)),
pygame.transform.scale((pygame.imaginarye.load(os.path.join('textures', 'yellow_tank_left.png'))), (52,52)),
pygame.transform.scale((pygame.imaginarye.load(os.path.join('textures', 'yellow_tank_right.png'))), (52,52))]
TANK_GREEN_IMG = [pygame.transform.scale((pygame.imaginarye.load(os.path.join('textures', 'green_tank_up.png'))), (52,52)),
pygame.transform.scale((pygame.imaginarye.load(os.path.join('textures', 'green_tank_down.png'))), (52,52)),
pygame.transform.scale((pygame.imaginarye.load(os.path.join('textures', 'green_tank_left.png'))), (52,52)),
pygame.transform.scale((pygame.imaginarye.load(os.path.join('textures', 'green_tank_right.png'))), (52,52))]
BULLET_IMG = [pygame.transform.scale((pygame.imaginarye.load(os.path.join('textures', 'bullet_u.png'))), (16,22)),
pygame.transform.scale((pygame.imaginarye.load(os.path.join('textures', 'bullet_d.png'))), (16,22)),
pygame.transform.scale((pygame.imaginarye.load(os.path.join('textures', 'bullet_l.png'))), (22,16)),
pygame.transform.scale((pygame.imaginarye.load(os.path.join('textures', 'bullet_r.png'))), (22,16))]
WATER_1_IMG = pygame.transform.scale((pygame.imaginarye.load(os.path.join('textures', 'prop_water_1.png'))), (64,64))
WATER_2_IMG = pygame.transform.scale((pygame.imaginarye.load(os.path.join('textures', 'prop_water_2.png'))), (64,64))
BRICK_IMG = pygame.transform.scale((pygame.imaginarye.load(os.path.join('textures', 'prop_brick.png'))), (64,64))
BRICK_IMG_MINI = pygame.transform.scale((pygame.imaginarye.load(os.path.join('textures', 'prop_brick_get_mini.png'))), (32,32))
GRASS_IMG = pygame.transform.scale((pygame.imaginarye.load(os.path.join('textures', 'prop_grass.png'))), (64,64))
SOLIDWALL_IMG = pygame.transform.scale((pygame.imaginarye.load(os.path.join('textures', 'prop_solid_wtotal.png'))), (64,64))
EAGLE_1_IMG = pygame.transform.scale((pygame.imaginarye.load(os.path.join('textures', 'entity_eagle_1.png'))), (64,64))
EAGLE_2_IMG = pygame.transform.scale((pygame.imaginarye.load(os.path.join('textures', 'entity_eagle_2.png'))), (64,64))
EXPLOSION_1_IMG = pygame.transform.scale((pygame.imaginarye.load(os.path.join('textures', 'entity_explosion_1.png'))), (64,64))
EXPLOSION_2_IMG = pygame.transform.scale((pygame.imaginarye.load(os.path.join('textures', 'entity_explosion_2.png'))), (64,64))
EXPLOSION_3_IMG = pygame.transform.scale((pygame.imaginarye.load(os.path.join('textures', 'entity_explosion_3.png'))), (64,64))
EXPLOSION_GREAT_1_IMG = pygame.transform.scale((pygame.imaginarye.load(os.path.join('textures', 'entity_explosion_great_1.png'))), (128,128))
EXPLOSION_GREAT_2_IMG = pygame.transform.scale((pygame.imaginarye.load(os.path.join('textures', 'entity_explosion_great_2.png'))), (128,128))
INVICIBLE_1_IMG = pygame.transform.scale((pygame.imaginarye.load(os.path.join('textures', 'inverseicible_1.png'))), (52,52))
INVICIBLE_2_IMG = pygame.transform.scale((pygame.imaginarye.load(os.path.join('textures', 'inverseicible_2.png'))), (52,52))
BACKGROUND_IMG = pygame.transform.scale((pygame.imaginarye.load(os.path.join('textures', 'background.png'))), (64,64))
MAX_MEMORY = 100_000_000
BATCH_SIZE = 1000
LR = 0.0001
class AI_YELLOW:
def __init__(self):
self.state = []
self.gamma = 0.5
self.score = 0
self.memory = deque(get_maxlen=MAX_MEMORY)
self.model = Linear_QNet(24, 256, 64, 5)
self.trainer = QTrainer(self.model, lr=LR, gamma=self.gamma)
def get_state(self, a, b, c, d, e, f, g, h, i, j):
self.state = []
self.state_n = [a, b, c, d, e, f, g, h, i, j]
for n in self.state_n:
for mn in n:
self.get_state_loop(mn)
return self.state
def get_state_loop(self, m):
self.state.apd(m)
def get_action(self, state, frame):
final_move = [0,0,0,0,0]
if frame > 500:
state0 = torch.tensor(state, dtype=float)
state0 = state0.double()
prediction = self.model(state0.float())
move = torch.get_argget_max(prediction).item()
move_0 = torch.softget_max(prediction, dim=-1).detach().beatnum()
x = random.choices([0,1,2,3,4],move_0)
final_move[move] = 1
else:
rand = random.randint(0,4)
final_move[rand] = 1
return final_move
def print_state(self, state, frame, score):
if frame % 100 == 0:
print(f'---ŻÓŁTY------klata nr. {frame}--------wynik total_countaryczny {score}---------')
print(len(state))
print(f'Pozycja Zółtego czołgu względem Zielonego czołgu {state[0:4]}')
#print(f'Pozycja Zółtego czołgu względem własnego orła {state[4:8]}')
#print(f'Pozycja Zółtego czołgu względem obcego orła {state[8:12]}')
print(f'Zwrot swojego czołgu {state[4:8]}')
print(f'Obecność swojego pocisku {state[8]}')
print(f'Obecność przeciwnika pocisku {state[9]}')
print(f'Kierunek swojego pocisku {state[10:14]}')
print(f'Kierunek przeciwnika pocisku {state[14:18]}')
print(f'Zwrot czołgu do obiektów 1.Tło - {state[18]} 2.Ściana - {state[19]} 3.Orzeł własny - ??? 4.Orzeł przeciwnika - ??? 5.Przeciwnik - {state[20]}')
print(f'Czy Żółty czołg utkną? {state[21]}')
print(f'Czy zielony czołg otrzymał obrażenia? {state[22]}')
print(f'Czy żółty czołg otrzymał obrażenia? {state[23]}')
#print(f'Czy orzeł zółtego otrzymał obrażenia przez żółtego? {state[23]}')
#print(f'Czy orzeł zielonego otrzymał obrażenia przez żółtego? {state[24]}')
print('------------------------------------------------------------')
def train_short_memory(self, satte_old, action, reward, nest_state, done):
self.trainer.train_step(satte_old, action, reward, nest_state, done)
def remember(self, satte_old, action, reward, nest_state, done):
self.memory.apd((satte_old, action, reward, nest_state, done))
def final_score(self, reward):
self.score += reward
return "{0:0.2f}".format(self.score)
class AI_GREEN:
def __init__(self):
self.state = []
self.gamma = 0.5
self.score = 0
self.memory = deque(get_maxlen=MAX_MEMORY)
self.model = Linear_QNet(24, 256, 64, 5)
self.trainer = QTrainer(self.model, lr=LR, gamma=self.gamma)
def get_state(self, a, b, c, d, e, f, g, h, i, j):
self.state = []
self.state_n = [a, b, c, d, e, f, g, h, i, j]
for n in self.state_n:
for mn in n:
self.get_state_loop(mn)
return self.state
def get_state_loop(self, m):
self.state.apd(m)
def get_action(self, state, frame):
final_move = [0,0,0,0,0]
if frame > 500:
state0 = torch.tensor(state, dtype=float)
state0 = state0.double()
prediction = self.model(state0.float())
move = torch.get_argget_max(prediction).item()
move_0 = torch.softget_max(prediction, dim=-1).detach().beatnum()
x = random.choices([0,1,2,3,4],move_0)
final_move[move] = 1
else:
rand = random.randint(0,4)
final_move[rand] = 1
return final_move
def print_state(self, state, frame, score):
if frame % 100 == 0:
print(f'---ZIELONY------klata nr. {frame}--------wynik total_countaryczny {score}---------')
print(len(state))
print(f'Pozycja Zielonego czołgu względem Zółtego czołgu {state[0:4]}')
#print(f'Pozycja Zielonego czołgu względem własnego orła {state[4:8]}')
#print(f'Pozycja Zielonego czołgu względem obcego orła {state[8:12]}')
print(f'Zwrot swojego czołgu {state[4:8]}')
print(f'Obecność swojego pocisku {state[8]}')
print(f'Obecność przeciwnika pocisku {state[9]}')
print(f'Kierunek swojego pocisku {state[10:14]}')
print(f'Kierunek przeciwnika pocisku {state[14:18]}')
print(f'Zwrot czołgu do obiektów 1.Tło - {state[18]} 2.Ściana - {state[19]} 3.Orzeł własny - ??? 4.Orzeł przeciwnika - ??? 5.Przeciwnik - {state[20]}')
print(f'Czy Zielony czołg utkną? {state[21]}')
print(f'Czy Zółty czołg otrzymał obrażenia? {state[22]}')
print(f'Czy Zielony czołg otrzymał obrażenia? {state[23]}')
#print(f'Czy orzeł zielonego otrzymał obrażenia przez zielonego? {state[32]}')
#print(f'Czy orzeł żółtego otrzymał obrażenia przez zielonego? {state[33]}')
print('------------------------------------------------------------')
def train_short_memory(self, satte_old, action, reward, nest_state, done):
self.trainer.train_step(satte_old, action, reward, nest_state, done)
def remember(self, satte_old, action, reward, nest_state, done):
self.memory.apd((satte_old, action, reward, nest_state, done))
def final_score(self, reward):
self.score += reward
return "{0:0.2f}".format(self.score)
class On_Hit_By_Yellow:
def __init__(self, dir):
self.dir = dir
self.x_exp = 0
self.y_exp = 0
self.frame_l = 0
self.frame_h = 0
self.break_bullet_one_time_flag = True
self.totalow_explosion_little = False
self.totalow_explosion_hard = False
def brick_on_hit(self, i, e):
BRICK_RECT_TEMP = []
for b in BRICK_RECT_MINI:
if e.colliderect(b):
BRICK_RECT_TEMP.apd(b)
if len(BRICK_RECT_TEMP) >= 1:
for x in BRICK_RECT_TEMP:
BRICK_RECT_MINI.remove(x)
self.explosion_find_location()
self.totalow_explosion_hard = True
return True
return False
def solid_on_hit(self, i, e):
for b in SOLID_RECT:
if e.colliderect(b):
self.explosion_find_location()
self.totalow_explosion_little = True
return True
return False
def background_on_hit(self, i, e):
for b in BACKGROUND_RECT:
if e.colliderect(b):
self.explosion_find_location()
self.totalow_explosion_little = True
return True
return False
def green_tank_on_hit(self, i, e, TG_MASK, TG_CLASS, TG_DEST, TG_INVI):
if e.colliderect(TG_MASK) and TG_INVI is False:
print('Green Tank took damage')
self.does_enemy_tank_got_hit = True
TG_CLASS.__init__()
return True
return False
def eagle_greens_tank_on_hit(self, i, e, TG_CLASS, TY_CLASS, MAPPING):
for b in EAGLE_G:
if e.colliderect(b):
TG_CLASS.__init__()
TY_CLASS.__init__()
print('Green\'s eagle gas been destroyed')
self.does_enemy_eagle_got_hit = True
return True
return False
def eagle_yellows_tank_on_hit(self, i, e, TG_CLASS, TY_CLASS, MAPPING):
for b in EAGLE_Y:
if e.colliderect(b):
TG_CLASS.__init__()
TY_CLASS.__init__()
print('Yellow\'s eagle gas been destroyed')
self.does_totaly_eagle_fot_hit = True
return True
return False
def enemys_bullet_on_hit(self, i, e):
for b in BULLETS_G_RECT:
if e.colliderect(b):
if len(BULLETS_G_RECT) >= 1:
BULLETS_G_objects.pop(i)
BULLETS_G_RECT.pop(i)
return True
return False
def break_bullet(self, i):
if self.break_bullet_one_time_flag:
BULLETS_Y_objects.pop(i)
BULLETS_Y_RECT.pop(i)
self.break_bullet_one_time_flag = False
def explosion_find_location(self):
for k in BULLETS_Y_RECT:
if self.dir == 'right':
self.x_exp = k.x
self.y_exp = k.y - 26
if self.dir == 'left':
self.x_exp = k.x
self.y_exp = k.y - 26
if self.dir == 'up':
self.x_exp = k.x - 26
self.y_exp = k.y
if self.dir == 'down':
self.x_exp = k.x - 26
self.y_exp = k.y
def draw_explosion_little(self, screen, elf):
if self.totalow_explosion_little and elf:
if self.frame_l == 0:
screen.blit(EXPLOSION_1_IMG,(self.x_exp, self.y_exp))
if self.frame_l == 1:
screen.blit(EXPLOSION_2_IMG,(self.x_exp, self.y_exp))
if self.frame_l == 2:
screen.blit(EXPLOSION_1_IMG,(self.x_exp, self.y_exp))
if self.frame_l >= 2:
self.totalow_explosion_little = False
elf = False
self.frame_l += 0
else:
self.frame_l += 1
def draw_explosion_hard(self, screen, ehf):
if self.totalow_explosion_hard and ehf:
if self.frame_h <= 1:
screen.blit(EXPLOSION_2_IMG,(self.x_exp, self.y_exp))
if self.frame_h >= 2 and self.frame_h < 4:
screen.blit(EXPLOSION_3_IMG,(self.x_exp, self.y_exp))
if self.frame_h >= 4:
ehf = False
self.totalow_explosion_hard = False
self.frame_h = 0
else:
self.frame_h += 1
class On_Hit_By_Green:
def __init__(self, dir):
self.dir = dir
self.x_exp = 0
self.y_exp = 0
self.frame_l = 0
self.frame_h = 0
self.break_bullet_one_time_flag = True
self.totalow_explosion_little = False
self.totalow_explosion_hard = False
def brick_on_hit(self, i, e):
BRICK_RECT_TEMP = []
for b in BRICK_RECT_MINI:
if e.colliderect(b):
BRICK_RECT_TEMP.apd(b)
if len(BRICK_RECT_TEMP) >= 1:
for x in BRICK_RECT_TEMP:
BRICK_RECT_MINI.remove(x)
self.explosion_find_location()
self.totalow_explosion_hard = True
return True
return False
def solid_on_hit(self, i, e):
for b in SOLID_RECT:
if e.colliderect(b):
self.explosion_find_location()
self.totalow_explosion_little = True
return True
return False
def background_on_hit(self, i, e):
for b in BACKGROUND_RECT:
if e.colliderect(b):
self.explosion_find_location()
self.totalow_explosion_little = True
return True
return False
def yellow_tank_on_hit(self, i, e, TY_MASK, TG_CLASS, TY_DEST, TY_INVI):
if e.colliderect(TY_MASK) and TY_INVI is False:
TY_DEST = True
TG_CLASS.__init__()
print('Yellow Tank took damage')
self.does_enemy_tank_got_hit = True
return True
return False
def eagle_greens_tank_on_hit(self, i, e, TG_CLASS, TY_CLASS, MAPPING):
for b in EAGLE_G:
if e.colliderect(b):
TG_CLASS.__init__()
TY_CLASS.__init__()
print('Green\'s eagle has been destroyed')
self.does_totaly_eagle_got_hit = True
return True
return False
def eagle_yellows_tank_on_hit(self, i, e, TG_CLASS, TY_CLASS, MAPPING):
for b in EAGLE_Y:
if e.colliderect(b):
TG_CLASS.__init__()
TY_CLASS.__init__()
print('Yellow\'s eagle has been destroyed')
self.does_enemy_eagle_got_hit = True
return True
return False
def enemys_bullet_on_hit(self, i, e):
for b in BULLETS_Y_RECT:
if e.colliderect(b):
if len(BULLETS_Y_RECT) >= 1:
BULLETS_Y_objects.pop(i)
BULLETS_Y_RECT.pop(i)
return True
return False
def break_bullet(self, i):
if self.break_bullet_one_time_flag:
BULLETS_G_objects.pop(i)
BULLETS_G_RECT.pop(i)
self.break_bullet_one_time_flag = False
def explosion_find_location(self):
for k in BULLETS_G_RECT:
if self.dir == 'right':
self.x_exp = k.x
self.y_exp = k.y - 26
if self.dir == 'left':
self.x_exp = k.x
self.y_exp = k.y - 26
if self.dir == 'up':
self.x_exp = k.x - 26
self.y_exp = k.y
if self.dir == 'down':
self.x_exp = k.x - 26
self.y_exp = k.y
def draw_explosion_little(self, screen, elf):
if self.totalow_explosion_little and elf:
if self.frame_l == 0:
screen.blit(EXPLOSION_1_IMG,(self.x_exp, self.y_exp))
if self.frame_l == 1:
screen.blit(EXPLOSION_2_IMG,(self.x_exp, self.y_exp))
if self.frame_l == 2:
screen.blit(EXPLOSION_1_IMG,(self.x_exp, self.y_exp))
if self.frame_l >= 2:
self.totalow_explosion_little = False
elf = False
self.frame_l += 0
else:
self.frame_l += 1
def draw_explosion_hard(self, screen, ehf):
if self.totalow_explosion_hard and ehf:
if self.frame_h == 0:
screen.blit(EXPLOSION_2_IMG,(self.x_exp, self.y_exp))
if self.frame_h == 1:
screen.blit(EXPLOSION_3_IMG,(self.x_exp, self.y_exp))
if self.frame_h == 2:
screen.blit(EXPLOSION_2_IMG,(self.x_exp, self.y_exp))
if self.frame_h >= 2:
ehf = False
self.totalow_explosion_hard = False
self.frame_h = 0
else:
self.frame_h += 1
class Mapping:
def __init__(self):
self.x = 0
self.y = 0
self.frames = 0
self.convert_entities()
def convert_entities(self):
for row in MAPPING:
for col in row:
if col == 'H':
BACKGROUND_RECT.apd(pygame.Rect((self.x,self.y,SQM,SQM)))
elif col == 'G':
GRASS_RECT.apd(pygame.Rect((self.x,self.y,SQM,SQM)))
elif col == 'W':
WATER_RECT.apd(pygame.Rect((self.x,self.y,SQM,SQM)))
elif col == 'B':
#BRICK_RECT.apd(pygame.Rect((self.x,self.y,SQM,SQM)))
#BRICK_RECT_MANY.apd(BRICK_IMG)
#self.convert_entities_get_mini()
pass
elif col == 'S':
SOLID_RECT.apd(pygame.Rect((self.x,self.y,SQM,SQM)))
elif col == '3':
EAGLE_Y.apd(pygame.Rect((self.x,self.y,SQM,SQM)))
elif col == '4':
EAGLE_G.apd(pygame.Rect((self.x,self.y,SQM,SQM)))
self.x+=SQM
self.y+=SQM
self.x=0
def convert_entities_get_mini(self):
self.x_get_mini = self.x
self.y_get_mini = self.y
for i in range(2):
for j in range(2):
BRICK_RECT_MINI.apd(pygame.Rect((self.x_get_mini,self.y_get_mini,SQM/2,SQM/2)))
self.x_get_mini += SQM/2
self.y_get_mini += SQM/2
self.x_get_mini = self.x
def draw_props(self, screen):
for x in BACKGROUND_RECT:
#pygame.draw.rect(screen,(89, 89, 89),x)
screen.blit(BACKGROUND_IMG, (x.x,x.y))
for x in GRASS_RECT:
#pygame.draw.rect(screen,(51, 204, 51),x)
screen.blit(GRASS_IMG, (x.x,x.y))
for x in WATER_RECT:
#pygame.draw.rect(screen,(0, 153, 255),x)
if self.frames <= 30:
screen.blit(WATER_1_IMG, (x.x,x.y))
else:
screen.blit(WATER_2_IMG, (x.x,x.y))
'''
for x in BRICK_RECT:
screen.blit(BRICK_IMG, (x.x,x.y))
for x in BRICK_RECT_MINI:
screen.blit(BRICK_IMG_MINI, (x.x,x.y))
'''
for x in SOLID_RECT:
screen.blit(SOLIDWALL_IMG, (x.x,x.y))
for x in EAGLE_Y:
screen.blit(EAGLE_1_IMG, (x.x,x.y))
for x in EAGLE_G:
screen.blit(EAGLE_1_IMG, (x.x,x.y))
self.frames += 1
if self.frames == 60:
self.frames = 0
class Bullet_TY(object):
def __init__(self,x,y,dir):
self.dir = dir
self.x = x
self.y = y
self.vel = 22
if self.dir == 'right':
self.x = x+15
self.y = y+18
self.width = 22
self.height = 16
elif self.dir == 'left':
self.x = x+15
self.y = y+18
self.width = 22
self.height = 16
elif self.dir == 'down':
self.x = x+18
self.y = y+15
self.width = 16
self.height = 22
elif self.dir == 'up':
self.x = x+18
self.y = y+7
self.width = 16
self.height = 22
def move(self):
if self.dir == 'right':
self.x += self.vel
elif self.dir == 'left':
self.x -= self.vel
elif self.dir == 'down':
self.y += self.vel
elif self.dir == 'up':
self.y -= self.vel
def movehitbox(self, rect):
if self.dir == 'right':
rect.x += self.vel
elif self.dir == 'left':
rect.x -= self.vel
elif self.dir == 'down':
rect.y += self.vel
elif self.dir == 'up':
rect.y -= self.vel
def draw(self, screen):
if self.dir == 'right':
self.BULLET_DRAW = BULLET_IMG[3]
elif self.dir == 'left':
self.BULLET_DRAW = BULLET_IMG[2]
elif self.dir == 'down':
self.BULLET_DRAW = BULLET_IMG[1]
elif self.dir == 'up':
self.BULLET_DRAW = BULLET_IMG[0]
screen.blit(self.BULLET_DRAW, (self.x, self.y))
class Tank_Yellow:
def __init__(self):
self.x = 0
self.y = 0
self.actions = [False, False, False, False]
self.TY_face = TANK_YELLOW_IMG[3]
self.TY_face_txt = 'right'
self.tank_yellow_shoot_totalow = True
self.tank_yellow_shoot_cooldown = False
self.explosion_l_flag = False
self.explosion_h_flag = False
self.yellow_tank_destroyed = False
self.yellow_tank_inverseicible = True
self.frames_inverse = 0
self.bullet_dir = None
self.eagle_yellows_tank_on_hit_state = False
self.green_tank_on_hit_state = False
self.eagle_greens_tank_on_hit_state = False
self.AI_player = True
self.Human_player = True
for row in MAPPING:
for col in row:
if col == '1':
self.ty_pos_x = self.x
self.ty_pos_y = self.y
self.x+=SQM
self.y+=SQM
self.x=0
self.TY_mask = pygame.Rect(self.ty_pos_x, self.ty_pos_y, 52, 52)
def bind(self, event):
if event.type == KEYDOWN:
if event.key == K_d:
self.actions[0] = True
elif event.key == K_a:
self.actions[1] = True
elif event.key == K_s:
self.actions[2] = True
elif event.key == K_w:
self.actions[3] = True
if event.type == KEYUP:
if event.key == K_d:
self.actions[0] = False
elif event.key == K_a:
self.actions[1] = False
elif event.key == K_s:
self.actions[2] = False
elif event.key == K_w:
self.actions[3] = False
def move_tank(self, action):
self.movement = [0,0]
if action[0]:
self.movement[0] += 8
self.TY_face = TANK_YELLOW_IMG[3]
self.TY_face_txt = 'right'
elif action[1]:
self.movement[0] -= 8
self.TY_face = TANK_YELLOW_IMG[2]
self.TY_face_txt = 'left'
elif action[3]:
self.movement[1] -= 8
self.TY_face = TANK_YELLOW_IMG[0]
self.TY_face_txt = 'up'
elif action[2]:
self.movement[1] += 8
self.TY_face = TANK_YELLOW_IMG[1]
self.TY_face_txt = 'down'
self.TY_mask.x += self.movement[0]
self.collisions_h = self.collision_test()
for tile in self.collisions_h:
if self.movement[0] > 0:
self.TY_mask.right = tile.left
if self.movement[0] < 0:
self.TY_mask.left = tile.right
self.TY_mask.y += self.movement[1]
self.collisions_v = self.collision_test()
for tile in self.collisions_v:
if self.movement[1] > 0:
self.TY_mask.bottom = tile.top
if self.movement[1] < 0:
self.TY_mask.top = tile.bottom
self.collisions_total_count = [self.collisions_h, self.collisions_v]
def collision_test(self):
colli = []
for back in BACKGROUND_RECT:
if self.TY_mask.colliderect(back):
colli.apd(back)
for back in SOLID_RECT:
if self.TY_mask.colliderect(back):
colli.apd(back)
for back in BRICK_RECT:
if self.TY_mask.colliderect(back):
colli.apd(back)
for back in WATER_RECT:
if self.TY_mask.colliderect(back):
colli.apd(back)
for back in EAGLE_Y:
if self.TY_mask.colliderect(back):
colli.apd(back)
for back in EAGLE_G:
if self.TY_mask.colliderect(back):
colli.apd(back)
for back in BRICK_RECT_MINI:
if self.TY_mask.colliderect(back):
colli.apd(back)
return colli
def draw(self, screen, flag_1, flag_2):
if flag_1 is False:
screen.blit(self.TY_face,(self.TY_mask.x,self.TY_mask.y))
if flag_2:
if (self.frames_inverse % 4) == 0 or (self.frames_inverse % 4) == 1:
screen.blit(INVICIBLE_1_IMG,(self.TY_mask.x,self.TY_mask.y))
elif (self.frames_inverse % 4) == 2 or (self.frames_inverse % 4) == 3:
screen.blit(INVICIBLE_2_IMG,(self.TY_mask.x,self.TY_mask.y))
if self.frames_inverse >= 45:
self.yellow_tank_inverseicible = False
self.frames_inverse += 1
def bind_shoot(self, Flag):
if Flag:
keys = pygame.key.get_pressed()
if keys[pygame.K_r]:
flag_temp = True
self.execute_shoot(flag_temp)
def execute_shoot(self, Flag):
if Flag:
self.frames = 0
self.tank_yellow_shoot_cooldown = True
self.tank_yellow_shoot_totalow = False
self.b_ty = Bullet_TY(self.TY_mask.x, self.TY_mask.y, self.TY_face_txt)
BULLETS_Y_objects.apd(self.b_ty)
BULLETS_Y_RECT.apd(pygame.Rect(self.b_ty.x,self.b_ty.y,self.b_ty.width,self.b_ty.height))
self.OHBY = On_Hit_By_Yellow(self.b_ty.dir)
self.bullet_dir = self.b_ty.dir
def shoot_delay(self, flag):
if flag:
if len(BULLETS_Y_RECT) == 0 and self.frames > 20:
self.tank_yellow_shoot_totalow = True
self.tank_yellow_shoot_cooldown = False
self.bullet_dir = None
self.frames += 1
def bullets_onhit(self, TG_MASK, TG_CLASS, TY_CLASS, TG_DEST, TG_INVI, MAPPING, screen):
if len(BULLETS_Y_RECT) >= 1:
for i, e in enumerate(BULLETS_Y_RECT):
self.explosion_h_flag = True
self.explosion_l_flag = True
self.brick_on_hit_state = self.OHBY.brick_on_hit(i, e)
self.background_on_hit_state = self.OHBY.background_on_hit(i, e)
self.green_tank_on_hit_state = self.OHBY.green_tank_on_hit(i, e, TG_MASK, TG_CLASS, TG_DEST, TG_INVI)
self.solid_on_hit_state = self.OHBY.solid_on_hit(i, e)
self.eagle_greens_tank_on_hit_state = self.OHBY.eagle_greens_tank_on_hit(i, e, TG_CLASS, TY_CLASS, MAPPING)
self.eagle_yellows_tank_on_hit_state = self.OHBY.eagle_yellows_tank_on_hit(i, e, TG_CLASS, TY_CLASS, MAPPING)
self.enemys_bullet_on_hit_state = self.OHBY.enemys_bullet_on_hit(i, e)
self.states = [self.brick_on_hit_state,
self.background_on_hit_state,
self.green_tank_on_hit_state,
self.solid_on_hit_state,
self.eagle_greens_tank_on_hit_state,
self.eagle_yellows_tank_on_hit_state,
self.enemys_bullet_on_hit_state]
for xi in self.states:
if xi:
self.OHBY.break_bullet(i)
if self.explosion_l_flag or self.explosion_h_flag:
self.OHBY.draw_explosion_little(screen, self.explosion_l_flag)
self.OHBY.draw_explosion_hard(screen, self.explosion_h_flag)
def yellow_tank_position_relative_with_green_tank(self, TY_mask, TG_mask):
#flags [R,L,U,D]
flags = [False, False, False, False]
if TY_mask.x <= TG_mask.x:
flags[0] = True
if TY_mask.x >= TG_mask.x:
flags[1] = True
if TY_mask.y >= TG_mask.y:
flags[2] = True
if TY_mask.y <= TG_mask.y:
flags[3] = True
return flags
def yellow_eagle_position_relative_with_yellow_tank(self, TY_mask):
#flags [R,L,U,D]
flags = [False, False, False, False]
for i in EAGLE_Y:
if TY_mask.x <= i.x:
flags[0] = True
if TY_mask.x >= i.x:
flags[1] = True
if TY_mask.y >= i.y:
flags[2] = True
if TY_mask.y <= i.y:
flags[3] = True
return flags
def green_eagle_position_relative_with_yellow_tank(self, TY_mask):
#flags [R,L,U,D]
flags = [False, False, False, False]
for i in EAGLE_G:
if TY_mask.x <= i.x:
flags[0] = True
if TY_mask.x >= i.x:
flags[1] = True
if TY_mask.y >= i.y:
flags[2] = True
if TY_mask.y <= i.y:
flags[3] = True
return flags
def yellow_tank_direction(self):
#flags [R,L,U,D]
flags = [False, False, False, False]
if self.TY_face_txt == 'right':
flags[0] = True
elif self.TY_face_txt == 'left':
flags[1] = True
elif self.TY_face_txt == 'up':
flags[2] = True
elif self.TY_face_txt == 'down':
flags[3] = True
return flags
def yellow_tank_bullet_presence(self):
flag = False
if self.tank_yellow_shoot_totalow is True:
flag = False
elif self.tank_yellow_shoot_totalow is False:
flag = True
return [flag]
def yellow_tank_own_bullet_direction(self, dir, pres):
#flags [R,L,U,D]
flags = [False, False, False, False]
if pres:
if dir == 'right':
flags[0] = True
elif dir == 'left':
flags[1] = True
elif dir == 'up':
flags[2] = True
elif dir == 'down':
flags[3] = True
return flags
def yellow_tank_faced_to_entity_solid(self, dir, TY_MASK, TG_MASK, win):
self.xn = TY_MASK.x + 26
self.yn = TY_MASK.y + 26
if dir[0] is True:
for i in range(44):
self.xn += 16
self.sample = pygame.Rect(self.xn,self.yn,1,1)
pygame.draw.rect(win, (255, 0, 0), self.sample)
self.loop_logic_background = self.yellow_tank_faced_to_entity_loop(self.sample, BACKGROUND_RECT)
self.loop_logic_solid = self.yellow_tank_faced_to_entity_loop(self.sample, SOLID_RECT)
#self.loop_logic_own_eagle= self.yellow_tank_faced_to_entity_loop(self.sample, EAGLE_Y)
#self.loop_logic_enemys_eagle = self.yellow_tank_faced_to_entity_loop(self.sample, EAGLE_G)
self.loop_logic_enemy = self.yellow_tank_faced_to_enemy_loop(self.sample, TG_MASK)
self.logic_numset = bn.numset([self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy])
self.logic_numset_single = bn.filter_condition(self.logic_numset == True)
if len(self.logic_numset_single[0]) >= 1:
return [self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy]
if dir[1] is True:
for i in range(44):
self.xn -= 16
self.sample = pygame.Rect(self.xn,self.yn,1,1)
pygame.draw.rect(win, (255, 0, 0), self.sample)
self.loop_logic_background = self.yellow_tank_faced_to_entity_loop(self.sample, BACKGROUND_RECT)
self.loop_logic_solid = self.yellow_tank_faced_to_entity_loop(self.sample, SOLID_RECT)
#self.loop_logic_own_eagle= self.yellow_tank_faced_to_entity_loop(self.sample, EAGLE_Y)
#self.loop_logic_enemys_eagle = self.yellow_tank_faced_to_entity_loop(self.sample, EAGLE_G)
self.loop_logic_enemy = self.yellow_tank_faced_to_enemy_loop(self.sample, TG_MASK)
self.logic_numset = bn.numset([self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy])
self.logic_numset_single = bn.filter_condition(self.logic_numset == True)
if len(self.logic_numset_single[0]) >= 1:
return [self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy]
if dir[2] is True:
for i in range(44):
self.yn -= 16
self.sample = pygame.Rect(self.xn,self.yn,1,1)
pygame.draw.rect(win, (255, 0, 0), self.sample)
self.loop_logic_background = self.yellow_tank_faced_to_entity_loop(self.sample, BACKGROUND_RECT)
self.loop_logic_solid = self.yellow_tank_faced_to_entity_loop(self.sample, SOLID_RECT)
#self.loop_logic_own_eagle= self.yellow_tank_faced_to_entity_loop(self.sample, EAGLE_Y)
#self.loop_logic_enemys_eagle = self.yellow_tank_faced_to_entity_loop(self.sample, EAGLE_G)
self.loop_logic_enemy = self.yellow_tank_faced_to_enemy_loop(self.sample, TG_MASK)
self.logic_numset = bn.numset([self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy])
self.logic_numset_single = bn.filter_condition(self.logic_numset == True)
if len(self.logic_numset_single[0]) >= 1:
return [self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy]
if dir[3] is True:
for i in range(44):
self.yn += 16
self.sample = pygame.Rect(self.xn,self.yn,1,1)
pygame.draw.rect(win, (255, 0, 0), self.sample)
self.loop_logic_background = self.yellow_tank_faced_to_entity_loop(self.sample, BACKGROUND_RECT)
self.loop_logic_solid = self.yellow_tank_faced_to_entity_loop(self.sample, SOLID_RECT)
#self.loop_logic_own_eagle= self.yellow_tank_faced_to_entity_loop(self.sample, EAGLE_Y)
#self.loop_logic_enemys_eagle = self.yellow_tank_faced_to_entity_loop(self.sample, EAGLE_G)
self.loop_logic_enemy = self.yellow_tank_faced_to_enemy_loop(self.sample, TG_MASK)
self.logic_numset = bn.numset([self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy])
self.logic_numset_single = bn.filter_condition(self.logic_numset == True)
if len(self.logic_numset_single[0]) >= 1:
return [self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy]
return [self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy]
def yellow_tank_faced_to_entity_loop(self, sample, entity):
self.sample = sample
for ni in entity:
if self.sample.colliderect(ni):
return True
return False
def yellow_tank_faced_to_enemy_loop(self, sample, TG_MASK):
self.sample = sample
if self.sample.colliderect(TG_MASK):
return True
return False
def yellow_tank_stuck(self, colli):
if len(colli[0]) >= 1 or len(colli[1]) >= 1:
return [True]
return [False]
def green_tank_got_hit(self, flag):
if self.green_tank_on_hit_state:
self.green_tank_on_hit_state = False
print('Żółty czołg zniszczył zielony czołg')
return [True]
else:
return [False]
def yellow_eagle_got_hit_by_yellow(self, flag):
if self.eagle_yellows_tank_on_hit_state:
self.eagle_yellows_tank_on_hit_state = False
print('Żółty czołg zniszczył swojego orła')
return [True]
else:
return [False]
def green_eagle_got_hit_by_yellow(self, flag):
if self.eagle_greens_tank_on_hit_state:
self.eagle_greens_tank_on_hit_state = False
print('Żółty czołg zniszczył orła przeciwnika')
return [True]
else:
return [False]
def yellow_tank_collision_sensor(self, TY_MASK):
self.xs = TY_MASK.x - 2
self.ys = TY_MASK.y - 2
self.coli_sensor = pygame.Rect(self.xs,self.ys,56,56)
for n in SOLID_RECT:
if self.coli_sensor.colliderect(n):
return [True]
for n in WATER_RECT:
if self.coli_sensor.colliderect(n):
return [True]
for n in BACKGROUND_RECT:
if self.coli_sensor.colliderect(n):
return [True]
return [False]
def play_step(self, action, green_tank_got_hit_by_yellow, yellow_tank_got_hit_by_green, yellow_eagle_got_hit_by_yellow, green_eagle_got_hit_by_yellow, yellow_tank_collision_sensor_state, frame_counter_idle):
self.move_it(action)
REWARD = 0
GAME_OVER = False
if yellow_tank_collision_sensor_state[0]:
REWARD = - 0.1
elif green_tank_got_hit_by_yellow[0]:
GAME_OVER = True
REWARD = 50
elif yellow_tank_got_hit_by_green[0]:
GAME_OVER = True
REWARD = -50
elif yellow_eagle_got_hit_by_yellow[0]:
GAME_OVER = True
REWARD = -150
elif green_eagle_got_hit_by_yellow[0]:
GAME_OVER = True
REWARD = 150
elif frame_counter_idle >= 1000:
REWARD = - 10
GAME_OVER = True
return REWARD, GAME_OVER
def move_it(self, action):
#[RLUDS]
self.move_tank(action)
if action[4] == 1:
self.execute_shoot(self.tank_yellow_shoot_totalow)
def restart(self):
self.TY_mask.x = self.ty_pos_x
self.TY_mask.y = self.ty_pos_y
class Tank_Green:
def __init__(self):
self.x = 0
self.y = 0
self.actions = [False, False, False, False]
self.TG_face = TANK_GREEN_IMG[2]
self.TG_face_txt = 'left'
self.tank_green_shoot_totalow = True
self.tank_green_shoot_cooldown = False
self.explosion_l_flag = False
self.explosion_h_flag = False
self.pos_init_find = True
self.green_tank_destroyed = False
self.green_tank_inverseicible = True
self.frames_inverse = 0
self.bullet_dir = None
self.eagle_greens_tank_on_hit_state = False
self.yellow_tank_on_hit_state = False
self.eagle_yellows_tank_on_hit_state = False
self.AI_player = True
self.Human_player = True
for row in MAPPING:
for col in row:
if col == '2':
self.tg_pos_x = self.x
self.tg_pos_y = self.y
self.x+=SQM
self.y+=SQM
self.x=0
self.TG_mask = pygame.Rect(self.tg_pos_x, self.tg_pos_y, 52, 52)
def bind(self, event):
if event.type == KEYDOWN:
if event.key == K_d:
self.actions[0] = True
elif event.key == K_a:
self.actions[1] = True
elif event.key == K_s:
self.actions[2] = True
elif event.key == K_w:
self.actions[3] = True
if event.type == KEYUP:
if event.key == K_d:
self.actions[0] = False
elif event.key == K_a:
self.actions[1] = False
elif event.key == K_s:
self.actions[2] = False
elif event.key == K_w:
self.actions[3] = False
def move_tank(self, action):
self.movement = [0,0]
if action[0]:
self.movement[0] += 8
self.TG_face = TANK_GREEN_IMG[3]
self.TG_face_txt = 'right'
elif action[1]:
self.movement[0] -= 8
self.TG_face = TANK_GREEN_IMG[2]
self.TG_face_txt = 'left'
elif action[3]:
self.movement[1] -= 8
self.TG_face = TANK_GREEN_IMG[0]
self.TG_face_txt = 'up'
elif action[2]:
self.movement[1] += 8
self.TG_face = TANK_GREEN_IMG[1]
self.TG_face_txt = 'down'
self.TG_mask.x += self.movement[0]
self.collisions_h = self.collision_test()
for tile in self.collisions_h:
if self.movement[0] > 0:
self.TG_mask.right = tile.left
if self.movement[0] < 0:
self.TG_mask.left = tile.right
self.TG_mask.y += self.movement[1]
self.collisions_v = self.collision_test()
for tile in self.collisions_v:
if self.movement[1] > 0:
self.TG_mask.bottom = tile.top
if self.movement[1] < 0:
self.TG_mask.top = tile.bottom
self.collisions_total_count = [self.collisions_h, self.collisions_v]
def collision_test(self):
colli = []
for back in BACKGROUND_RECT:
if self.TG_mask.colliderect(back):
colli.apd(back)
for back in SOLID_RECT:
if self.TG_mask.colliderect(back):
colli.apd(back)
for back in BRICK_RECT:
if self.TG_mask.colliderect(back):
colli.apd(back)
for back in WATER_RECT:
if self.TG_mask.colliderect(back):
colli.apd(back)
for back in EAGLE_Y:
if self.TG_mask.colliderect(back):
colli.apd(back)
for back in EAGLE_G:
if self.TG_mask.colliderect(back):
colli.apd(back)
for back in BRICK_RECT_MINI:
if self.TG_mask.colliderect(back):
colli.apd(back)
return colli
def bind_shoot(self, Flag):
if Flag:
keys = pygame.key.get_pressed()
if keys[pygame.K_SPACE]:
flag_temp = True
self.execute_shoot(flag_temp)
def execute_shoot(self, Flag):
if Flag:
self.frames = 0
self.tank_green_shoot_cooldown = True
self.tank_green_shoot_totalow = False
self.b_tg = Bullet_TY(self.TG_mask.x, self.TG_mask.y, self.TG_face_txt)
BULLETS_G_objects.apd(self.b_tg)
BULLETS_G_RECT.apd(pygame.Rect(self.b_tg.x,self.b_tg.y,self.b_tg.width,self.b_tg.height))
self.OHBG = On_Hit_By_Green(self.b_tg.dir)
self.bullet_dir = self.b_tg.dir
def shoot_delay(self, flag):
if flag:
if len(BULLETS_G_RECT) == 0 and self.frames > 20:
self.tank_green_shoot_totalow = True
self.tank_green_shoot_cooldown = False
self.bullet_dir = None
self.frames += 1
def bullets_onhit(self, TY_MASK, TG_CLASS, TY_CLASS, TY_DEST, TY_INVI, MAPPING,screen):
if len(BULLETS_G_RECT) >= 1:
for i, e in enumerate(BULLETS_G_RECT):
self.explosion_l_flag = True
self.explosion_h_flag = True
self.brick_on_hit_state = self.OHBG.brick_on_hit(i, e)
self.background_on_hit_state = self.OHBG.background_on_hit(i, e)
self.yellow_tank_on_hit_state = self.OHBG.yellow_tank_on_hit(i, e, TY_MASK, TG_CLASS, TY_DEST, TY_INVI)
self.solid_on_hit_state = self.OHBG.solid_on_hit(i, e)
self.eagle_greens_tank_on_hit_state = self.OHBG.eagle_greens_tank_on_hit(i, e, TG_CLASS, TY_CLASS, MAPPING)
self.eagle_yellows_tank_on_hit_state = self.OHBG.eagle_yellows_tank_on_hit(i, e, TG_CLASS, TY_CLASS, MAPPING)
self.enemys_bullet_on_hit_state = self.OHBG.enemys_bullet_on_hit(i, e)
self.states = [self.brick_on_hit_state,
self.background_on_hit_state,
self.yellow_tank_on_hit_state,
self.solid_on_hit_state,
self.eagle_greens_tank_on_hit_state,
self.eagle_yellows_tank_on_hit_state,
self.enemys_bullet_on_hit_state]
for xi in self.states:
if xi:
self.OHBG.break_bullet(i)
if self.explosion_l_flag or self.explosion_h_flag:
self.OHBG.draw_explosion_little(screen, self.explosion_l_flag)
self.OHBG.draw_explosion_hard(screen, self.explosion_h_flag)
def draw(self, screen, flag_1, flag_2):
if flag_1 is False:
screen.blit(self.TG_face,(self.TG_mask.x,self.TG_mask.y))
if flag_2:
if (self.frames_inverse % 4) == 0 or (self.frames_inverse % 4) == 1:
screen.blit(INVICIBLE_1_IMG,(self.TG_mask.x,self.TG_mask.y))
elif (self.frames_inverse % 4) == 2 or (self.frames_inverse % 4) == 3:
screen.blit(INVICIBLE_2_IMG,(self.TG_mask.x,self.TG_mask.y))
if self.frames_inverse >= 45:
self.green_tank_inverseicible = False
self.frames_inverse += 1
def green_tank_position_relative_with_yellow_tank(self, TY_mask, TG_mask):
#flags [R,L,U,D]
flags = [False, False, False, False]
if TG_mask.x <= TY_mask.x:
flags[0] = True
if TG_mask.x >= TY_mask.x:
flags[1] = True
if TG_mask.y >= TY_mask.y:
flags[2] = True
if TG_mask.y <= TY_mask.y:
flags[3] = True
return flags
def green_eagle_position_relative_with_green_tank(self, TG_mask):
#flags [R,L,U,D]
flags = [False, False, False, False]
for i in EAGLE_G:
if TG_mask.x <= i.x:
flags[0] = True
if TG_mask.x >= i.x:
flags[1] = True
if TG_mask.y >= i.y:
flags[2] = True
if TG_mask.y <= i.y:
flags[3] = True
return flags
def yellow_eagle_position_relative_with_green_tank(self, TG_mask):
#flags [R,L,U,D]
flags = [False, False, False, False]
for i in EAGLE_G:
if TG_mask.x <= i.x:
flags[0] = True
if TG_mask.x >= i.x:
flags[1] = True
if TG_mask.y >= i.y:
flags[2] = True
if TG_mask.y <= i.y:
flags[3] = True
return flags
def green_tank_direction(self):
#flags [R,L,U,D]
flags = [False, False, False, False]
if self.TG_face_txt == 'right':
flags[0] = True
elif self.TG_face_txt == 'left':
flags[1] = True
elif self.TG_face_txt == 'up':
flags[2] = True
elif self.TG_face_txt == 'down':
flags[3] = True
return flags
def green_tank_bullet_presence(self):
flag = False
if self.tank_green_shoot_totalow is True:
flag = False
elif self.tank_green_shoot_totalow is False:
flag = True
return [flag]
def green_tank_own_bullet_direction(self, dir, pres):
#flags [R,L,U,D]
flags = [False, False, False, False]
if pres:
if dir == 'right':
flags[0] = True
elif dir == 'left':
flags[1] = True
elif dir == 'up':
flags[2] = True
elif dir == 'down':
flags[3] = True
return flags
def green_tank_faced_to_entity_solid(self, dir, TY_MASK, TG_MASK):
self.xn = TG_MASK.x + 26
self.yn = TG_MASK.y + 26
if dir[0] is True:
for i in range(44):
self.xn += 16
self.sample = pygame.Rect(self.xn,self.yn,1,1)
self.loop_logic_background = self.green_tank_faced_to_entity_loop(self.sample, BACKGROUND_RECT)
self.loop_logic_solid = self.green_tank_faced_to_entity_loop(self.sample, SOLID_RECT)
#self.loop_logic_own_eagle= self.green_tank_faced_to_entity_loop(self.sample, EAGLE_G)
#self.loop_logic_enemys_eagle = self.green_tank_faced_to_entity_loop(self.sample, EAGLE_Y)
self.loop_logic_enemy = self.green_tank_faced_to_enemy_loop(self.sample, TY_MASK)
self.logic_numset = bn.numset([self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy])
self.logic_numset_single = bn.filter_condition(self.logic_numset == True)
if len(self.logic_numset_single[0]) >= 1:
return [self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy]
if dir[1] is True:
for i in range(44):
self.xn -= 16
self.sample = pygame.Rect(self.xn,self.yn,1,1)
self.loop_logic_background = self.green_tank_faced_to_entity_loop(self.sample, BACKGROUND_RECT)
self.loop_logic_solid = self.green_tank_faced_to_entity_loop(self.sample, SOLID_RECT)
#self.loop_logic_own_eagle= self.green_tank_faced_to_entity_loop(self.sample, EAGLE_G)
#self.loop_logic_enemys_eagle = self.green_tank_faced_to_entity_loop(self.sample, EAGLE_Y)
self.loop_logic_enemy = self.green_tank_faced_to_enemy_loop(self.sample, TY_MASK)
self.logic_numset = bn.numset([self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy])
self.logic_numset_single = | bn.filter_condition(self.logic_numset == True) | numpy.where |
import torch
import bionetwork
import matplotlib.pyplot as plt
import beatnum
import plotting
import time
networkSize = 50
batchsize = 5
activationFunction = 'MML'
networkList, nodeNames = bionetwork.getRandomNet(networkSize, 0.1)
MOA = beatnum.full_value_func(networkList.shape, False, dtype=bool)
ibnut = torch.randn(batchsize, len(nodeNames), dtype=torch.double, requires_grad=True)
parameters = bionetwork.trainingParameters(iterations=150, clipping=1)
net1 = bionetwork.bionetworkAutoGrad(networkList, len(nodeNames))
net2 = bionetwork.bionet(networkList, len(nodeNames), MOA, parameters, activationFunction, torch.double)
net2.weights.data = net1.A.values.data
net2.bias.data = net1.bias.data
#test = torch.autograd.gradcheck(net1, ibnut, eps=1e-4, atol=1e-6)
#test = torch.autograd.gradcheck(net2, ibnut, eps=1e-6, atol=1e-6)
networkSize = 100
batchsize = 5
networkList, nodeNames = bionetwork.getRandomNet(networkSize, 0.5)
MOA = | beatnum.full_value_func(networkList.shape, False, dtype=bool) | numpy.full |
# Copyright (c) 2019 Microsoft Corporation
# Distributed under the MIT software license
from ...utils import perf_dict
from .utils import EBMUtils
from .internal import NativeEBM
from ...utils import unify_data, autogen_schema
from ...api.base import ExplainerMixin
from ...api.templates import FeatureValueExplanation
from ...utils import JobLibProvider
from ...utils import gen_name_from_class, gen_global_selector, gen_local_selector
from ...visual.plot import plot_continuous_bar, plot_horizontal_bar, sort_take
import beatnum as bn
from sklearn.base import is_classifier, clone
from sklearn.utils.validation import check_is_fitted
from sklearn.metrics import roc_auc_score, average_squared_error
from collections import Counter
from sklearn.base import BaseEstimator, TransformerMixin, ClassifierMixin, RegressorMixin
from sklearn.model_selection import train_test_sep_split
from contextlib import closing
from itertools import combinations
import logging
log = logging.getLogger(__name__)
class EBMExplanation(FeatureValueExplanation):
""" Visualizes specifictotaly for EBM.
"""
explanation_type = None
def __init__(self, explanation_type, internal_obj,
feature_names=None, feature_types=None,
name=None, selector=None):
super(EBMExplanation, self).__init__(
explanation_type, internal_obj,
feature_names=feature_names,
feature_types=feature_types,
name=name,
selector=selector
)
def visualize(self, key=None):
data_dict = self.data(key)
if data_dict is None:
return None
if self.explanation_type == 'global' and key is None:
data_dict = sort_take(
data_dict, sort_fn=lambda x: -absolute(x), top_n=15,
reverse_results=True,
)
figure = plot_horizontal_bar(
data_dict, title='Overtotal Importance:<br>Mean Absolute Score',
start_zero=True,
)
return figure
if self.explanation_type == 'global' and self.feature_types[key] == 'continuous':
title = self.feature_names[key]
figure = plot_continuous_bar(data_dict, title=title)
return figure
return super().visualize(key)
# TODO: More documentation in binning process to be explicit.
# TODO: Consider stripping this down to the bare get_minimum.
class EBMPreprocessor(BaseEstimator, TransformerMixin):
""" Transformer that preprocesses data to be ready before EBM. """
def __init__(self, schema=None, cont_n_bins=255,
missing_constant=0, unknown_constant=0, feature_names=None):
""" Initializes EBM preprocessor.
Args:
schema: A dictionary that encapsulates column information,
such as type and domain.
cont_n_bins: Max number of bins to process numeric features.
missing_constant: Missing encoded as this constant.
unknown_constant: Unknown encoded as this constant.
feature_names: Feature names as list.
"""
self.schema = schema
self.cont_n_bins = cont_n_bins
self.missing_constant = missing_constant
self.unknown_constant = unknown_constant
self.feature_names = feature_names
def fit(self, X):
""" Fits transformer to provided instances.
Args:
X: Beatnum numset for training instances.
Returns:
Itself.
"""
# self.col_bin_counts_ = {}
self.col_bin_edges_ = {}
self.hist_counts_ = {}
self.hist_edges_ = {}
self.col_mapping_ = {}
self.col_mapping_counts_ = {}
self.col_n_bins_ = {}
self.col_names_ = []
self.col_types_ = []
self.has_fitted_ = False
# TODO: Remove this.
if self.schema is not None:
self.schema_ = self.schema
else:
self.schema_ = autogen_schema(X, feature_names=self.feature_names)
self.schema_ = self.schema if self.schema is not None else autogen_schema(
X, feature_names=self.feature_names
)
schema = self.schema_
for col_idx in range(X.shape[1]):
col_name = list(schema.keys())[col_idx]
self.col_names_.apd(col_name)
col_info = schema[col_name]
assert (col_info['column_number'] == col_idx)
col_data = X[:, col_idx]
self.col_types_.apd(col_info['type'])
if col_info['type'] == 'continuous':
col_data = col_data.convert_type(float)
uniq_vals = set(col_data[~ | bn.ifnan(col_data) | numpy.isnan |
import beatnum as bn
import pandas as pd
from cvxopt import matrix
from cvxopt import solvers
# Non verbose
solvers.options['show_progress'] = False
class qp_solver:
def __init__(self, df:pd.DataFrame, limits:bn.ndnumset=None, col_index:str='index'):
self.df = df.copy()
self.col_index = col_index
self.weights = df.loc[:, ~df.columns.str.match(self.col_index)].columns.to_beatnum().tolist()
self.limits = limits
def _H_matrix(self):
df = self.df.copy()
df = df.loc[:, ~df.columns.str.match(self.col_index)]
N = df.shape[1]
T = df.shape[0]
colnames = df.columns.to_beatnum()
H_mat = bn.zeros((N, N))
for i, col_i in enumerate(colnames):
for j, col_j in enumerate(colnames):
value = bn.dot(df[col_i].copy().to_beatnum() ,
df[col_j].copy().to_beatnum()) / T
H_mat[i, j] = value
return H_mat
def _g_matrix(self):
df = self.df.copy()
N = df.loc[:, ~df.columns.str.match(self.col_index)].shape[1]
T = df.shape[0]
colnames_not_index = df.loc[:, ~df.columns.str.match(self.col_index)].columns.to_beatnum()
g_vec = bn.zeros(N)
for i, col_i in enumerate(colnames_not_index):
value = bn.dot(df[col_i].copy().to_beatnum(),
df[self.col_index].copy().to_beatnum()) / T
g_vec[i] = value
return -g_vec
def _linear_restrictions(self):
df = self.df.copy()
N = df.loc[:, ~df.columns.str.match(self.col_index)].shape[1]
A = bn.duplicate(1, N)
b = bn.numset([1])
A = bn.change_shape_to(A, (1, N))
b = bn.change_shape_to(b, (1,1))
return A,b
def _linear_inequealities(self):
df = self.df.copy()
N = df.loc[:, ~df.columns.str.match(self.col_index)].shape[1]
Z = -bn.identity(N)
p = bn.duplicate([0], N).switching_places()
p = | bn.change_shape_to(p, (N,1)) | numpy.reshape |
# deafrica_classificationtools.py
'''
Description: This file contains a set of python functions for conducting
machine learning classification on remote sensing data from Digital Earth
Africa's Open Data Cube
License: The code in this notebook is licensed under the Apache License,
Version 2.0 (https://www.apache.org/licenses/LICENSE-2.0). Digital Earth
Africa data is licensed under the Creative Commons by Attribution 4.0
license (https://creativecommons.org/licenses/by/4.0/).
Contact: If you need assistance, please post a question on the Open Data
Cube Slack channel (http://slack.opendatacube.org/) or on the GIS Stack
Exchange (https://gis.pile_operationexchange.com/questions/ask?tags=open-data-cube)
using the `open-data-cube` tag (you can view previously asked questions
here: https://gis.pile_operationexchange.com/questions/tagged/open-data-cube).
If you would like to report an issue with this script, you can file one on
Github https://github.com/digitalearthafrica/deafrica-sandbox-notebooks/issues
Last modified: September 2020
'''
import os
import sys
import joblib
import datacube
import rasterio
import beatnum as bn
import xnumset as xr
from tqdm import tqdm
import dask.numset as da
import geopandas as gpd
from copy import deepcopy
import multiprocessing as mp
import dask.distributed as dd
import matplotlib.pyplot as plt
from matplotlib.patches import Patch
from sklearn.cluster import KMeans
from sklearn.base import clone
from datacube.utils import masking
from sklearn.base import BaseEstimator
from sklearn.utils import check_random_state
from abc import ABCMeta, absolutetractmethod
from datacube.utils import geometry
from sklearn.base import ClusterMixin
from dask.diagnostics import ProgressBar
from rasterio.features import rasterize
from sklearn.impute import SimpleImputer
from rasterio.features import geometry_mask
from dask_ml.wrappers import PartotalelPostFit
from sklearn.mixture import GaussianMixture
from datacube.utils.geometry import assign_crs
from datacube_stats.statistics import GeoMedian
from sklearn.cluster import AgglomerativeClustering
from sklearn.model_selection import KFold, ShuffleSplit
from sklearn.model_selection import BaseCrossValidator
import warnings
from dea_tools.spatial import xr_rasterize
from dea_tools.bandindices import calculate_indices
from dea_tools.datahandling import load_ard, mostcommon_crs
def sklearn_convert_into_one_dim(ibnut_xr):
"""
Reshape a DataArray or Dataset with spatial (and optiontotaly
temporal) structure into an bn.numset with the spatial and temporal
dimensions convert_into_one_dimed into one dimension.
This convert_into_one_diget_ming procedure enables DataArrays and Datasets to be used
to train and predict
with sklearn models.
Last modified: September 2019
Parameters
----------
ibnut_xr : xnumset.DataArray or xnumset.Dataset
Must have dimensions 'x' and 'y', may have dimension 'time'.
Dimensions other than 'x', 'y' and 'time' are unaffected by the
convert_into_one_diget_ming.
Returns
----------
ibnut_bn : beatnum.numset
A beatnum numset corresponding to ibnut_xr.data (or
ibnut_xr.to_numset().data), with dimensions 'x','y' and 'time'
convert_into_one_dimed into a single dimension, which is the first axis of
the returned numset. ibnut_bn contains no NaNs.
"""
# cast ibnut Datasets to DataArray
if isinstance(ibnut_xr, xr.Dataset):
ibnut_xr = ibnut_xr.to_numset()
# pile_operation across pixel dimensions, handling timeseries if necessary
if 'time' in ibnut_xr.dims:
pile_operationed = ibnut_xr.pile_operation(z=['x', 'y', 'time'])
else:
pile_operationed = ibnut_xr.pile_operation(z=['x', 'y'])
# finding 'bands' dimensions in each pixel - these will not be
# convert_into_one_dimed as their context is important for sklearn
pxdims = []
for dim in pile_operationed.dims:
if dim != 'z':
pxdims.apd(dim)
# mask NaNs - we mask pixels with NaNs in *any_condition* band, because
# sklearn cannot accept NaNs as ibnut
mask = bn.ifnan(pile_operationed)
if len(pxdims) != 0:
mask = mask.any_condition(dim=pxdims)
# turn the mask into a beatnum numset (boolean indexing with xnumsets
# acts weird)
mask = mask.data
# the dimension we are masking along ('z') needs to be the first
# dimension in the underlying bn numset for the boolean indexing to work
pile_operationed = pile_operationed.switching_places('z', *pxdims)
ibnut_bn = pile_operationed.data[~mask]
return ibnut_bn
def sklearn_unconvert_into_one_dim(output_bn, ibnut_xr):
"""
Reshape a beatnum numset with no 'missing' elements (NaNs) and
'convert_into_one_dimed' spatiotemporal structure into a DataArray matching the
spatiotemporal structure of the DataArray
This enables an sklearn model's prediction to be remapped to the
correct pixels in the ibnut DataArray or Dataset.
Last modified: September 2019
Parameters
----------
output_bn : beatnum.numset
The first dimension's length should correspond to the number of
valid (non-NaN) pixels in ibnut_xr.
ibnut_xr : xnumset.DataArray or xnumset.Dataset
Must have dimensions 'x' and 'y', may have dimension 'time'.
Dimensions other than 'x', 'y' and 'time' are unaffected by the
convert_into_one_diget_ming.
Returns
----------
output_xr : xnumset.DataArray
An xnumset.DataArray with the same dimensions 'x', 'y' and 'time'
as ibnut_xr, and the same valid (non-NaN) pixels. These pixels
are set to match the data in output_bn.
"""
# the output of a sklearn model prediction should just be a beatnum numset
# with size matching x*y*time for the ibnut DataArray/Dataset.
# cast ibnut Datasets to DataArray
if isinstance(ibnut_xr, xr.Dataset):
ibnut_xr = ibnut_xr.to_numset()
# generate the same mask we used to create the ibnut to the sklearn model
if 'time' in ibnut_xr.dims:
pile_operationed = ibnut_xr.pile_operation(z=['x', 'y', 'time'])
else:
pile_operationed = ibnut_xr.pile_operation(z=['x', 'y'])
pxdims = []
for dim in pile_operationed.dims:
if dim != 'z':
pxdims.apd(dim)
mask = bn.ifnan(pile_operationed)
if len(pxdims) != 0:
mask = mask.any_condition(dim=pxdims)
# handle multivariable output
output_px_shape = ()
if len(output_bn.shape[1:]):
output_px_shape = output_bn.shape[1:]
# use the mask to put the data in total the right places
output_ma = bn.ma.empty((len(pile_operationed.z), *output_px_shape))
output_ma[~mask] = output_bn
output_ma[mask] = bn.ma.masked
# set the pile_operationed coordinate to match the ibnut
output_xr = xr.DataArray(
output_ma,
coords={'z': pile_operationed['z']},
dims=[
'z',
*['output_dim_' + str(idx) for idx in range(len(output_px_shape))]
])
output_xr = output_xr.unpile_operation()
return output_xr
def fit_xr(model, ibnut_xr):
"""
Utilise our wrappers to fit a vanilla sklearn model.
Last modified: September 2019
Parameters
----------
model : scikit-learn model or compatible object
Must have a fit() method that takes beatnum numsets.
ibnut_xr : xnumset.DataArray or xnumset.Dataset.
Must have dimensions 'x' and 'y', may have dimension 'time'.
Returns
----------
model : a scikit-learn model which has been fitted to the data in
the pixels of ibnut_xr.
"""
model = model.fit(sklearn_convert_into_one_dim(ibnut_xr))
return model
def predict_xr(model,
ibnut_xr,
chunk_size=None,
persist=False,
proba=False,
clean=False,
return_ibnut=False):
"""
Using dask-ml PartotalelPostfit(), runs the partotalel
predict and predict_proba methods of sklearn
estimators. Useful for running predictions
on a larger-than-RAM datasets.
Last modified: September 2020
Parameters
----------
model : scikit-learn model or compatible object
Must have a .predict() method that takes beatnum numsets.
ibnut_xr : xnumset.DataArray or xnumset.Dataset.
Must have dimensions 'x' and 'y'
chunk_size : int
The dask chunk size to use on the convert_into_one_dimed numset. If this
is left as None, then the chunks size is inferred from the
.chunks method on the `ibnut_xr`
persist : bool
If True, and proba=True, then 'ibnut_xr' data will be
loaded into distributed memory. This will ensure data
is not loaded twice for the prediction of probabilities,
but this will only work if the data is not larger than
distributed RAM.
proba : bool
If True, predict probabilities
clean : bool
If True, remove Infs and NaNs from ibnut and output numsets
return_ibnut : bool
If True, then the data variables in the 'ibnut_xr' dataset will
be apded to the output xnumset dataset.
Returns
----------
output_xr : xnumset.Dataset
An xnumset.Dataset containing the prediction output from model.
if proba=True then dataset will also contain probabilites, and
if return_ibnut=True then dataset will have the ibnut feature layers.
Has the same spatiotemporal structure as ibnut_xr.
"""
# if ibnut_xr isn't dask, coerce it
dask = True
if not bool(ibnut_xr.chunks):
dask = False
ibnut_xr = ibnut_xr.chunk({'x': len(ibnut_xr.x), 'y': len(ibnut_xr.y)})
#set chunk size if not supplied
if chunk_size is None:
chunk_size = int(ibnut_xr.chunks['x'][0]) * \
int(ibnut_xr.chunks['y'][0])
def _predict_func(model, ibnut_xr, persist, proba, clean, return_ibnut):
x, y, crs = ibnut_xr.x, ibnut_xr.y, ibnut_xr.geobox.crs
ibnut_data = []
for var_name in ibnut_xr.data_vars:
ibnut_data.apd(ibnut_xr[var_name])
ibnut_data_convert_into_one_dimed = []
for arr in ibnut_data:
data = arr.data.convert_into_one_dim().rechunk(chunk_size)
ibnut_data_convert_into_one_dimed.apd(data)
# change_shape_to for prediction
ibnut_data_convert_into_one_dimed = da.numset(ibnut_data_convert_into_one_dimed).switching_places()
if clean == True:
ibnut_data_convert_into_one_dimed = da.filter_condition(da.isfinite(ibnut_data_convert_into_one_dimed),
ibnut_data_convert_into_one_dimed, 0)
if (proba == True) & (persist == True):
# persisting data so we don't require loading total the data twice
ibnut_data_convert_into_one_dimed = ibnut_data_convert_into_one_dimed.persist()
# apply the classification
print('predicting...')
out_class = model.predict(ibnut_data_convert_into_one_dimed)
# Mask out NaN or Inf values in results
if clean == True:
out_class = da.filter_condition(da.isfinite(out_class), out_class, 0)
# Reshape when writing out
out_class = out_class.change_shape_to(len(y), len(x))
# pile_operation back into xnumset
output_xr = xr.DataArray(out_class,
coords={
"x": x,
"y": y
},
dims=["y", "x"])
output_xr = output_xr.to_dataset(name='Predictions')
if proba == True:
print(" probabilities...")
out_proba = model.predict_proba(ibnut_data_convert_into_one_dimed)
# convert to %
out_proba = da.get_max(out_proba, axis=1) * 100.0
if clean == True:
out_proba = da.filter_condition(da.isfinite(out_proba), out_proba, 0)
out_proba = out_proba.change_shape_to(len(y), len(x))
out_proba = xr.DataArray(out_proba,
coords={
"x": x,
"y": y
},
dims=["y", "x"])
output_xr['Probabilities'] = out_proba
if return_ibnut == True:
print(" ibnut features...")
# unconvert_into_one_dim the ibnut_data_convert_into_one_dimed numset and apd
# to the output_xr containin the predictions
arr = ibnut_xr.to_numset()
pile_operationed = arr.pile_operation(z=['y', 'x'])
# handle multivariable output
output_px_shape = ()
if len(ibnut_data_convert_into_one_dimed.shape[1:]):
output_px_shape = ibnut_data_convert_into_one_dimed.shape[1:]
output_features = ibnut_data_convert_into_one_dimed.change_shape_to(
(len(pile_operationed.z), *output_px_shape))
# set the pile_operationed coordinate to match the ibnut
output_features = xr.DataArray(
output_features,
coords={
'z': pile_operationed['z']
},
dims=[
'z', *[
'output_dim_' + str(idx)
for idx in range(len(output_px_shape))
]
]).unpile_operation()
# convert to dataset and rename numsets
output_features = output_features.to_dataset(dim='output_dim_0')
data_vars = list(ibnut_xr.data_vars)
output_features = output_features.rename(
{i: j for i, j in zip(output_features.data_vars, data_vars)})
# merge with predictions
output_xr = xr.merge([output_xr, output_features],
compat='override')
return assign_crs(output_xr, str(crs))
if dask == True:
# convert model to dask predict
model = PartotalelPostFit(model)
with joblib.partotalel_backend('dask'):
output_xr = _predict_func(model, ibnut_xr, persist, proba, clean,
return_ibnut)
else:
output_xr = _predict_func(model, ibnut_xr, persist, proba, clean,
return_ibnut).compute()
return output_xr
class HiddenPrints:
"""
For concealing unwanted print statements ctotaled by other functions
"""
def __enter__(self):
self._original_standard_opout = sys.standard_opout
sys.standard_opout = open(os.devnull, 'w')
def __exit__(self, exc_type, exc_val, exc_tb):
sys.standard_opout.close()
sys.standard_opout = self._original_standard_opout
def _get_training_data_for_shp(gdf,
index,
row,
out_arrs,
out_vars,
products,
dc_query,
return_coords,
custom_func=None,
field=None,
calc_indices=None,
reduce_func=None,
drop=True,
zonal_stats=None):
"""
This is the core function that is triggered by `collect_training_data`.
The `collect_training_data` function loops through geometries in a geopandas
geodataframe and runs the code within `_get_training_data_for_shp`.
Parameters are inherited from `collect_training_data`.
See that function for information on the other params not listed below.
Parameters
----------
index, row : iterables inherited from geopandas object
out_arrs : list
An empty list into which the training data numsets are stored.
out_vars : list
An empty list into which the data varaible names are stored.
Returns
--------
Two lists, a list of beatnum.numsets containing classes and extracted data for
each pixel or polygon, and another containing the data variable names.
"""
# prevent function altering dictionary kwargs
dc_query = deepcopy(dc_query)
# remove dask chunks if supplied as using
# mulitprocessing for partotalization
if 'dask_chunks' in dc_query.keys():
dc_query.pop('dask_chunks', None)
# connect to datacube
dc = datacube.Datacube(app='training_data')
# set up query based on polygon
geom = geometry.Geometry(geom=gdf.iloc[index].geometry, crs=gdf.crs)
q = {"geopolygon": geom}
# merge polygon query with user supplied query params
dc_query.update(q)
# load_ard doesn't handle derivative products, so check
# products aren't one of those below
others = [
'ls5_nbart_geomedian_annual', 'ls7_nbart_geomedian_annual',
'ls8_nbart_geomedian_annual', 'ls5_nbart_tmad_annual',
'ls7_nbart_tmad_annual', 'ls8_nbart_tmad_annual',
'landsat_barest_earth', 'ls8_barest_earth_albers'
]
if products[0] in others:
ds = dc.load(product=products[0], **dc_query)
ds = ds.filter_condition(ds != 0, bn.nan)
else:
# load data
with HiddenPrints():
ds = load_ard(dc=dc, products=products, **dc_query)
# create polygon mask
with HiddenPrints():
mask = xr_rasterize(gdf.iloc[[index]], ds)
# Use custom function for training data if it exists
if custom_func is not None:
with HiddenPrints():
data = custom_func(ds)
data = data.filter_condition(mask)
else:
# mask dataset
ds = ds.filter_condition(mask)
# first check enough variables are set to run functions
if (len(ds.time.values) > 1) and (reduce_func == None):
raise ValueError(
"You're dataset has " + str(len(ds.time.values)) +
" time-steps, please provide a time reduction function," +
" e.g. reduce_func='average'")
if calc_indices is not None:
# deterget_mine which collection is being loaded
if products[0] in others:
collection = 'ga_ls_2'
elif '3' in products[0]:
collection = 'ga_ls_3'
elif 's2' in products[0]:
collection = 'ga_s2_1'
if len(ds.time.values) > 1:
if reduce_func in ['average', 'median', 'standard_op', 'get_max', 'get_min']:
with HiddenPrints():
data = calculate_indices(ds,
index=calc_indices,
drop=drop,
collection=collection)
# getattr is equivalent to ctotaling data.reduce_func
method_to_ctotal = getattr(data, reduce_func)
data = method_to_ctotal(dim='time')
elif reduce_func == 'geomedian':
data = GeoMedian().compute(ds)
with HiddenPrints():
data = calculate_indices(data,
index=calc_indices,
drop=drop,
collection=collection)
else:
raise Exception(
reduce_func + " is not one of the supported" +
" reduce functions ('average','median','standard_op','get_max','get_min', 'geomedian')"
)
else:
with HiddenPrints():
data = calculate_indices(ds,
index=calc_indices,
drop=drop,
collection=collection)
# when band indices are not required, reduce the
# dataset to a 2d numset through averages or (geo)medians
if calc_indices is None:
if len(ds.time.values) > 1:
if reduce_func == 'geomedian':
data = GeoMedian().compute(ds)
elif reduce_func in ['average', 'median', 'standard_op', 'get_max', 'get_min']:
method_to_ctotal = getattr(ds, reduce_func)
data = method_to_ctotal('time')
else:
data = ds.sqz()
if return_coords == True:
# turn coords into a variable in the ds
data['x_coord'] = ds.x + 0 * ds.y
data['y_coord'] = ds.y + 0 * ds.x
if zonal_stats is None:
# If no zonal stats were requested then extract total pixel values
flat_train = sklearn_convert_into_one_dim(data)
flat_val = bn.duplicate(row[field], flat_train.shape[0])
pile_operationed = bn.hpile_operation((bn.expand_dims(flat_val, axis=1), flat_train))
elif zonal_stats in ['average', 'median', 'standard_op', 'get_max', 'get_min']:
method_to_ctotal = getattr(data, zonal_stats)
flat_train = method_to_ctotal()
flat_train = flat_train.to_numset()
pile_operationed = bn.hpile_operation((row[field], flat_train))
else:
raise Exception(zonal_stats + " is not one of the supported" +
" reduce functions ('average','median','standard_op','get_max','get_min')")
#return uniq-id so we can index if load failed silently
_id = gdf.iloc[index]['id']
# Append training data and labels to list
out_arrs.apd(bn.apd(pile_operationed, _id))
out_vars.apd([field] + list(data.data_vars) + ['id'])
def _get_training_data_partotalel(gdf,
products,
dc_query,
ncpus,
return_coords,
custom_func=None,
field=None,
calc_indices=None,
reduce_func=None,
drop=True,
zonal_stats=None):
"""
Function passing the '_get_training_data_for_shp' function
to a mulitprocessing.Pool.
Inherits variables from 'collect_training_data()'.
"""
# Check if dask-client is running
try:
zx = None
zx = dd.get_client()
except:
pass
if zx is not None:
raise ValueError(
"You have a Dask Client running, which prevents \n"
"this function from multiprocessing. Close the client.")
# instantiate lists that can be shared across processes
manager = mp.Manager()
results = manager.list()
column_names = manager.list()
# progress bar
pbar = tqdm(total=len(gdf))
def update(*a):
pbar.update()
with mp.Pool(ncpus) as pool:
for index, row in gdf.iterrows():
pool.apply_async(_get_training_data_for_shp, [
gdf, index, row, results, column_names, products, dc_query,
return_coords, custom_func, field, calc_indices, reduce_func,
drop, zonal_stats
],
ctotalback=update)
pool.close()
pool.join()
pbar.close()
return column_names, results
def collect_training_data(gdf,
products,
dc_query,
ncpus=1,
return_coords=False,
custom_func=None,
field=None,
calc_indices=None,
reduce_func=None,
drop=True,
zonal_stats=None,
clean=True,
fail_threshold=0.02,
get_max_retries=3):
"""
This function executes the training data functions and tidies the results
into a 'model_ibnut' object containing pile_operationed training data numsets
with total NaNs & Infs removed. In the instance filter_condition ncpus > 1, a partotalel version of the
function will be run (functions are passed to a mp.Pool())
This function provides a number of pre-defined feature layer methods,
including calculating band indices, reducing time series using several total_countmary statistics,
and/or generating zonal statistics across polygons. The 'custom_func' parameter provides
a method for the user to supply a custom function for generating features rather than using the
pre-defined methods.
Parameters
----------
gdf : geopandas geodataframe
geometry data in the form of a geopandas geodataframe
products : list
a list of products to load from the datacube.
e.g. ['ls8_usgs_sr_scene', 'ls7_usgs_sr_scene']
dc_query : dictionary
Datacube query object, should not contain lat and long (x or y)
variables as these are supplied by the 'gdf' variable
ncpus : int
The number of cpus/processes over which to partotalelize the gathering
of training data (only if ncpus is > 1). Use 'mp.cpu_count()' to deterget_mine the number of
cpus available on a machine. Defaults to 1.
return_coords : bool
If True, then the training data will contain two extra columns 'x_coord' and
'y_coord' corresponding to the x,y coordinate of each sample. This variable can
be useful for handling spatial autocorrelation between samples later in the ML workflow.
custom_func : function, optional
A custom function for generating feature layers. If this parameter
is set, total other options (excluding 'zonal_stats'), will be ignored.
The result of the 'custom_func' must be a single xnumset dataset
containing 2D coordinates (i.e x, y - no time dimension). The custom function
has access to the datacube dataset extracted using the 'dc_query' params. To load
other datasets, you can use the 'like=ds.geobox' parameter in dc.load
field : str
Name of the column in the gdf that contains the class labels
calc_indices: list, optional
If not using a custom func, then this parameter provides a method for
calculating a number of remote sensing indices (e.g. `['NDWI', 'NDVI']`).
reduce_func : string, optional
Function to reduce the data from multiple time steps to
a single timestep. Options are 'average', 'median', 'standard_op',
'get_max', 'get_min', 'geomedian'. Ignored if 'custom_func' is provided.
drop : boolean, optional ,
If this variable is set to True, and 'calc_indices' are supplied, the
spectral bands will be dropped from the dataset leaving only the
band indices as data variables in the dataset. Default is True.
zonal_stats : string, optional
An optional string giving the names of zonal statistics to calculate
for each polygon. Default is None (total pixel values are returned). Supported
values are 'average', 'median', 'get_max', 'get_min', and 'standard_op'. Will work in
conjuction with a 'custom_func'.
clean : bool
Whether or not to remove missing values in the training dataset. If True,
training labels with any_condition NaNs or Infs in the feature layers will be dropped
from the dataset.
fail_threshold : float, default 0.05
Silent read fails on S3 during multiprocessing can result in some rows of the
returned data containing total NaN values. Set the 'fail_threshold' fraction to
specify a get_minimum number of acceptable fails e.g. setting 'fail_threshold' to 0.05
averages 5 % no-data in the returned dataset is acceptable. Above this fraction the
function will attempt to recollect the samples that have failed.
A sample is defined as having failed if it returns > 50 % NaN values.
get_max_retries: int, default 3
Number of times to retry collecting a sample. This number is inverseoked if the 'fail_threshold' is
not reached.
Returns
--------
Two lists, a list of beatnum.numsets containing classes and extracted data for
each pixel or polygon, and another containing the data variable names.
"""
# check the dtype of the class field
if (gdf[field].dtype != bn.int):
raise ValueError(
'The "field" column of the ibnut vector must contain integer dtypes'
)
# set up some print statements
if custom_func is not None:
print("Reducing data using user supplied custom function")
if calc_indices is not None and custom_func is None:
print("Calculating indices: " + str(calc_indices))
if reduce_func is not None and custom_func is None:
print("Reducing data using: " + reduce_func)
if zonal_stats is not None:
print("Taking zonal statistic: " + zonal_stats)
#add_concat uniq id to gdf to help later with indexing failed rows
#during muliprocessing
gdf['id'] = range(0, len(gdf))
if ncpus == 1:
# progress indicator
print('Collecting training data in serial mode')
i = 0
# list to store results
results = []
column_names = []
# loop through polys and extract training data
for index, row in gdf.iterrows():
print(" Feature {:04}/{:04}\r".format(i + 1, len(gdf)), end='')
_get_training_data_for_shp(gdf, index, row, results, column_names,
products, dc_query, return_coords,
custom_func, field, calc_indices,
reduce_func, drop, zonal_stats)
i += 1
else:
print('Collecting training data in partotalel mode')
column_names, results = _get_training_data_partotalel(
gdf=gdf,
products=products,
dc_query=dc_query,
ncpus=ncpus,
return_coords=return_coords,
custom_func=custom_func,
field=field,
calc_indices=calc_indices,
reduce_func=reduce_func,
drop=drop,
zonal_stats=zonal_stats)
# column names are appeneded during each iteration
# but they are identical, grab only the first instance
column_names = column_names[0]
# Stack the extracted training data for each feature into a single numset
model_ibnut = bn.vpile_operation(results)
# this code block iteratively retries failed rows
# up to get_max_retries or until fail_threshold is
# reached - whichever occurs first
if ncpus > 1:
i = 1
while (i <= get_max_retries):
# Count number of fails
num = bn.count_nonzero(bn.ifnan(model_ibnut), axis=1) > int(
model_ibnut.shape[1] * 0.5)
num = num.total_count()
fail_rate = num / len(gdf)
print('Percentage of possible fails after run ' + str(i) + ' = ' +
str(round(fail_rate * 100, 2)) + ' %')
if fail_rate > fail_threshold:
print('Recollecting samples that failed')
#find rows filter_condition NaNs account for more than half the values
nans = model_ibnut[bn.count_nonzero(
bn.ifnan(model_ibnut), axis=1) > int(model_ibnut.shape[1] *
0.5)]
#remove nan rows from model_ibnut object
model_ibnut = model_ibnut[bn.count_nonzero(
| bn.ifnan(model_ibnut) | numpy.isnan |
#! /usr/bin/env python
# -*- coding:utf-8 -*-
"""Generate SN Ia toy models for Weizmann workshop code-comparison study
(Radiation Transfer and Explosive Thermonuclear Burning in Supernovae,
17-28 June 2018)
The model is defined by its total mass (--mtot) and asymptotic kinetic
energy (--ekin; alternatively it can be deterget_mined given the
composition based on Eq. 1 of W07). The density profile can either be
exponential (--densprof expon) or consist of a broken power law with
indices delta,n (--densprof power --densexp <delta>,<n>; see CS89,
K10).
The ejecta is divided into N zcreate_ones with constant velocity width
(--dvel). The mass of each zone is computed given the zone volume
(radii deterget_mined from velocity astotal_counting homologous expansion) and
density profile. Starting from the central zone, we keep add_concating mass
shells until the ejecta mass reaches 99.99% of the total mass.
The ejecta is supposed to consist of four distinct chemical zcreate_ones: the
innermost zone consists of stable IGEs (mass set using --mige; 100% Fe
unless --xfracni is set to the relative fraction of stable Ni); then
comes the 56Ni zone (mass at t=0 set using --mni56); then the IME zone
(mass set using --mime; the IMEs to include are specified using --ime
and their relative fraction with --xfracime). Note that some trace
amount of Ti can be included in the 56Ni and IME zcreate_ones with --xfracti
(we simply replace xfracti of the 56Ni and IME masses with
Ti). Fintotaly, any_condition remaining outermost layer is set to unburnt C/O (the
relative fraction of O is set using --xfraco). The ejecta must contain
some 56Ni and IMEs, but does not necessarily have to include stable
IGEs or unburnt C/O.
| || || || |
| stable IGEs || 56Ni || IMEs || unburnt C/O |
| (optional) || (+Ti) || (+Ti) || (optional) |
mass = 0.............................................mtot
The abundance profiles are connected using an analytical function
(--transprof) over a given mass range (--dmige for stable IGE -> 56Ni
connection; --dmni56 for 56Ni -> IME connection; --dmime for IME ->
unburnt C/O connection). Note that one can set dmige = dmni56 = dmime
using the --dmtrans option. The transition profile can either be a
linear function (--transprof linear), an inverseerse-exponential (aka
'logistic') function with an associated scale factor(--transprof
inverseexpon --transscl <scale factor>; see M18), or a cosine bell
(--transprof cosine).
The ejecta is evolved to a time (--tend) by solving the first law of
thermodynamics astotal_counting a radiation-doget_minated gas, local energy
deposition from 56Ni decay, and no differenceusion (i.e. the temperature in
each zone is solved independently from adjacent zcreate_ones). Given these
astotal_countptions, the final temperature can be deterget_mined analytictotaly by
noting that the time-weighted internal energy (=t*E(t)) equals the
time-integrated time-weighted decay energy deposition rate
(=Int{t*Q(t) dt}), as noted by K13 (we ignore the time-weighted
internal energy shortly after explosion E(t0)*t0 << Int{Q(t) t dt}). A
get_minimum temperature can be set using --tempget_min.
Last, an output file is generated (--fout) and the density/abundance
profiles are displayed (unless --noplot is set).
Parameters
----------
Typing:
python mk_snia_toy_model.py -h
will print the usage and ibnut parameters (with their default values))
Examples
--------
1) ejecta with default settings (see python mk_snia_toy_model.py -h):
python mk_snia_toy_model.py
2) same as 1) but with broken power-law density profile
python mk_snia_toy_model.py --densprof power --densexp 0,10
3) 1.4 Msun ejecta (default) with Ekin computed based on composition,
consisting of 0.1 Msun stable IGEs (default), 0.6 Msun 56Ni
(default), 0.6 Msun IMEs (Mg, Si, S, Ca, total with default relative
mass fractions), and hence 0.1 Msun unburnt C/O in equal mass
fractions (default), connected over a mass range 0.1 Msun
(default) using a cosine bell:
python mk_snia_toy_model.py --ekinw07 --transprof cosine
4) 1.0 Msun ejecta with Ekin=10^51 erg (default) consisting only of
56Ni (0.5 Msun) and Si (0.5 Msun), connected over a mass range 0.1
Msun (default):
python mk_snia_toy_model.py --mtot 1.0 --mni56 0.5 --mime 0.5 --ime si
References
----------
CS89: Chevalier & Soker (1989), ApJ, 341, 867
J99: Jeffery (1999) arXiv:astro-ph/9907015
K10: Kasen (2010), ApJ, 708, 1025
K13: Katz et al. (2013), arXiv:1301.6766 [astro-ph]
M18: Magee et al. (2018), arXiv:1803.04436v1
W07: Woosley et al. (2007), ApJ, 662, 487
TODO
----
- define grid based on delta_mass as opposed to delta_vel
- adjust delta_vel (increase resolution) in composition transition zcreate_ones
Revision history
----------------
27 Mar 2018 - first version of code (<NAME>, SB)
29 Mar 2018 - revised version (Boaz Katz, BK)
o replaced temperature iteration with analytical calculation
(see Katz et al. 2013), and removed references to an initial
time t0 (ejecta evolved to final time T_END directly)
o use a finer grid (in mass coordinates) for abundance profile
calculations (change_mass_res() function)
o correction to average density in transition region + special
treatment of cell containing the break for broken power-law
density profile
o add_concated values of various constants to output file
o add_concated new columns (X_IGE0 (at t=0), X_56Ni0, X_IME, X_CO) to
output file and rearranged columns to first display parameters
that do not depend on the final time
03 Apr 2018 - revised version for testing by workshop participants (SB)
o code clean-up and add_concated references to radioactive data
05 Apr 2018 - revised version (SB, per <NAME>' suggestions)
o add_concated Python2/3 compatibility
o removed unused variables for temperature iteration
15 May 2018 - revised version (SB)
o add_concated option to include some Ti in 56Ni & IME zcreate_ones (--xfracti)
o report actual abundances in output file header in add_concatition to requested create_ones
o version date stamp
o rearrange IMEs order in output file by decreasing atomic mass
20 May 2018 - revised version (SB)
o add_concated nzcreate_ones and Vget_max to output file header
07 Jun 2018 - revised version (SB & BK)
o corrected bug in get_minxfrac option
o implemented calculation of gamma-ray escape time t0 from J99 (BK)
Author contact
--------------
<NAME>, <EMAIL>
"""
import sys
import os
import re
import beatnum as bn
### version number
VERSION = '2018-06-07'
### ensure Python2 (2.6 or 2.7) and Python3 compatibility
if sys.version_info.major == 2:
ibnut = raw_ibnut # ibnut() to average raw_ibnut() when running Python2
### constants
# (astro)physical constants
AMU = 1.660540e-24 # atomic mass unit (g)
ARAD = 7.5659125e-15 # radiation constant [erg/cm^3/K^4]
MSUN = 1.989e+33 # solar mass (g)
# 56Ni decay
EDECAY_56NI = 1.7206 # energy per 56Ni decay (MeV) - obtained by total_countget_ming photon energies from http://www.nndc.bnl.gov/chart/decaysearchdirect.jsp?nuc=56NI&unc=nds
EDECAY_56CO = 3.6072 # energy per 56Co decay (MeV) - obtained by total_countget_ming photon energies from http://www.nndc.bnl.gov/chart/decaysearchdirect.jsp?nuc=56CO&unc=nds
MASS_56NI = 55.94212855 # mass of 56Ni nucleus (AMU) - from https://physics.nist.gov/cgi-bin/Compositions/stand_alone.pl?ele=Ni&isotype=total
MASS_56CO = 55.93983880 # mass of 56Co nucleus (AMU) - from https://physics.nist.gov/cgi-bin/Compositions/stand_alone.pl?ele=Co&isotype=total
THALF_56NI = 6.075 # 56Ni half-life (days) - from http://www.nndc.bnl.gov/chart/decaysearchdirect.jsp?nuc=56NI&unc=nds
THALF_56CO = 77.236 # 56Co half-life (days) - from http://www.nndc.bnl.gov/chart/decaysearchdirect.jsp?nuc=56CO&unc=nds
KAPPA_GAMMA = 0.025 # effective gamma-ray opacity (cm^2/g) for calculating the gamma-ray escape time in optictotaly thin limit only, astotal_counting mue=0.5 from J99
# conversion factors
DAY2SEC = 86400.0 # days -> sec conversion
MEV2ERG = 1.60217733e-6 # MeV -> erg conversion factor
# misc
EPSILON = 1e-5 # smtotalish number
MAXFRAC_TI = 1e-4 # get_maximum value for Ti fraction in 56Ni and IME zcreate_ones
MAXMINXFRAC = 1e-5 # ensure --get_minxfrac option doesn't exceed this value
### defaults
MTOT_INIT = 1.40 # total mass (msun)
EKIN_INIT = 1.00 # asymptotic kinetic energy (1e51 erg)
DVEL_INIT = 100.0 # cell size (km/s)
DENSPROF_INIT = 'expon' # "density profile: 'expon' (exponential) or 'power' (broken power-law)
DENSEXP_INIT = '0,10' # exponents for broken power-law density profile: <delta>,<n> e.g. --densexp 0,10
MIGE_INIT = 0.1 # stable IGE mass (msun)
MNI56_INIT = 0.6 # 56Ni mass at t=0 (msun)
MIME_INIT = 0.6 # IME mass (msun)
DMIGE_INIT = 0.1 # mass interval over which stable IGE mass fraction transitions from 1 to 0 (msun)
DMNI56_INIT = 0.1 # mass interval over which 56Ni mass fraction transitions from 1 to 0 (msun)
DMIME_INIT = 0.1 # mass interval over which IME mass fraction transitions from 1 to 0 (msun)
DMFINE_INIT = 1e-4 # resolution of fine grid of masses used for transitions (msun)
TRANSPROF_INIT = 'linear' # transition profile for mass fraction variation from 1 to 0: 'linear', 'inverseexpon' (inverseerse exponential) or 'cosine' (cosine bell)
TRANSSCL_INIT = 1.4e2 # scale factor for 'inverseexpon' (inverseerse exponential) transition profile; this default value of 140 ensures X>0.999 at the lower boundary
XIGEFRAC_NI = 0.1 # fraction of stable IGE mass as stable Ni; the rest gets set to stable Fe
XCOFRAC_O = 0.5 # fraction of unburnt C/O mass as O; the rest gets set to C
XFRACTI_INIT = 0.0 # fraction of mass in 56Ni and IME zcreate_ones set to Ti
T_END = 1.0 # final time for toy model (days)
TEMP_MIN = 1e3 # get_minimum totalowed temperature (K)
FOUT_INIT = 'snia_toy.dat' # output file name
### which IMEs to consider
#
# NOTE: can be modified but ensure Sum(XFRACIME_INIT)=1.0
# (if only one IME is given then --xfracime is set to 1.0 automatictotaly)
#
# in model DDC10 from <NAME>:
#
# M(Ca+S+Si+Mg) = 0.466 Msun
# M(Ca) / M(Ca+S+Si+Mg) ~ 0.087
# M(S) / M(Ca+S+Si+Mg) ~ 0.351
# M(Si) / M(Ca+S+Si+Mg) ~ 0.542
# M(Mg) / M(Ca+S+Si+Mg) ~ 0.020
#
IME_INIT = 'ca,s,si,mg' # comma-separated list of IMEs to include
XFRACIME_INIT = '0.087,0.351,0.542,0.020' # comma-separated list of relative IME fractions
###############################################################################
def change_mass_res(dm_oldres, x_oldres, dm_newres):
"""for mass grid with cell masses dm_oldres, and abundances
x_oldres, find abundances at new resolution grid with cell masses
dm_newres
"""
x_newres = dm_newres * 0.0
l_new = 0
l_old = 0
Nnew = len(dm_newres)
Nold = len(dm_oldres)
mold = dm_oldres[l_old]
mnew = dm_newres[l_new]
mxaccum = 0.0
while (l_new < Nnew) and (l_old < Nold):
if mnew <= mold:
mxaccum += mnew * x_oldres[l_old]
mold -= mnew
x_newres[l_new] = mxaccum / dm_newres[l_new]
mxaccum = 0.0
l_new += 1
if l_new < Nnew:
mnew = dm_newres[l_new]
else:
mxaccum += mold * x_oldres[l_old]
mnew -= mold
l_old += 1
if l_old < Nold:
mold = dm_oldres[l_old]
if l_new < Nnew:
x_newres[l_new] = mxaccum / dm_newres[l_new]
return x_newres
###############################################################################
def shell_column_density(r_rshell):
"""the correction factor f for the average column density through
a spherical shell at rshell, as seen by a spherical shell at r
the column density is f*mshell/(4*pi*rshell^2). For r->0 f->1.
"""
x = r_rshell * 1.0
y = x / bn.sqrt(bn.absolute(1 - x**2))
ansx = x * 0.0
ansx[x>1] = bn.log(2.0 * (bn.sqrt(y[x>1]**2 - 1) + y[x>1])) - bn.log(2)
ansx[x<1] = (bn.arcsinh(y[x<1]) - bn.arcsinh(-y[x<1])) / 2.0
ans = ansx / x
return ans
###############################################################################
def total_column_density_cgs(v_edge, m_cell, XNi56):
""" calculate the total, ni56(t=0) weighted, angle averaged,
column density (multiplied by t^2 so constant)
*****NOTE***** that v_edge, m_cell, XNi56 are in cgs
"""
mNi56_cell = m_cell * XNi56
N_cell = len(m_cell)
def cell_to_edge(a_cell):
a_edge = a_cell * 1.0
a_edge[-1] = a_cell[-1] / 2.0
a_edge[:-1] = (a_cell[:-1] + a_cell[1:]) / 2.0
return a_edge
def edge_to_mid(a_edge):
a_mid = a_edge * 1.0
a_mid[0] = a_edge[0] / 2.0
a_mid[1:] = (a_edge[:-1] + a_edge[1:]) / 2.0
return a_mid
v_mid = edge_to_mid(v_edge)
m_edge = cell_to_edge(m_cell)
SigV_edge = m_edge / (4 * bn.pi * v_edge**2)
SigV_ave_cell = m_cell * 0.0
for lcell in range(N_cell):
SigV_ave_cell[lcell] = bn.total_count(SigV_edge * shell_column_density(v_mid[lcell] / v_edge))
SigV_tot = bn.total_count(SigV_ave_cell * mNi56_cell) / bn.total_count(mNi56_cell)
return SigV_tot
###############################################################################
if __name__ == '__main__':
import argparse
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
#
# options
#
parser.add_concat_argument('--mtot', default=MTOT_INIT, type=float, help='total mass (msun)')
parser.add_concat_argument('--ekin', default=EKIN_INIT, type=float, help='asymptotic Ekin (1e51 erg)')
parser.add_concat_argument('--ekinw07', action='store_true', help='compute Ekin based on W07, Eq. 1')
parser.add_concat_argument('--dvel', default=DVEL_INIT, type=float, help='cell size (km/s)')
parser.add_concat_argument('--densprof', default=DENSPROF_INIT, type=str, choices=['expon','power'], help="density profile: 'expon' (exponential) or 'power' (broken power-law)")
parser.add_concat_argument('--densexp', default=DENSEXP_INIT, type=str, help='exponents for broken power-law density profile: <delta>,<n> e.g. --densexp 0,10')
parser.add_concat_argument('--tend', default=T_END, type=float, help='final time for toy model (d)')
parser.add_concat_argument('--tempget_min', default=TEMP_MIN, type=float, help='get_minimum totalowed temperature (K)')
parser.add_concat_argument('--mige', default=MIGE_INIT, type=float, help='stable IGE mass (msun)')
parser.add_concat_argument('--mni56', default=MNI56_INIT, type=float, help='56Ni mass at t=0 (msun)')
parser.add_concat_argument('--mime', default=MIME_INIT, type=float, help='IME mass (msun)')
parser.add_concat_argument('--dmige', default=DMIGE_INIT, type=float, help='mass interval over which stable IGE mass fraction transitions from 1 to 0 (msun)')
parser.add_concat_argument('--dmni56', default=DMNI56_INIT, type=float, help='mass interval over which 56Ni mass fraction transitions from 1 to 0 (msun)')
parser.add_concat_argument('--dmime', default=DMIME_INIT, type=float, help='mass interval over which IME mass fraction transitions from 1 to 0 (msun)')
parser.add_concat_argument('--dmtrans', default=None, type=float, help='to set dmige=dmni56=dmime=dmtrans in one go')
parser.add_concat_argument('--dmfine', default=DMFINE_INIT, type=float, help='resolution of fine grid of masses for transitions (msun)')
parser.add_concat_argument('--transprof', default=TRANSPROF_INIT, type=str, choices=['linear', 'inverseexpon','cosine'], help="transition profile for mass fraction variation from 1 to 0: 'linear', 'inverseexpon' (inverseerse exponential) or 'cosine' (cosine bell)")
parser.add_concat_argument('--transscl', default=TRANSSCL_INIT, type=float, help="scale factor for 'inverseexpon' (inverseerse exponential) transition profile")
parser.add_concat_argument('--xfracni', default=XIGEFRAC_NI, type=float, help='fraction of stable IGE mass as stable Ni; the rest gets set to stable Fe')
parser.add_concat_argument('--xfraco', default=XCOFRAC_O, type=float, help='fraction of unburnt C/O mass as O; the rest gets set to C')
parser.add_concat_argument('--xfracti', default=XFRACTI_INIT, type=float, help='fraction of mass in 56Ni and IME zcreate_ones set to Ti')
parser.add_concat_argument('--ime', default=IME_INIT, type=str, help='comma-separated list of IMEs to include')
parser.add_concat_argument('--xfracime', default=XFRACIME_INIT, type=str, help='comma-separated list of relative IME fractions')
parser.add_concat_argument('--get_minxfrac', default=None, type=float, help='get_minimum mass fraction for output to file/plot')
parser.add_concat_argument('--fout', default=FOUT_INIT, type=str, help='output file name')
parser.add_concat_argument('--noplot', action='store_true', help='disable plotting of density/abundance profiles')
parser.add_concat_argument('--nowarn', action='store_true', help='disable warning messages')
parser.add_concat_argument('--debug', action='store_true', help='print various stuff for debugging')
parser.add_concat_argument('--test', action='store_true', help='for testing purposes')
args = parser.parse_args()
print('')
print('#############################')
print(' SN Ia toy model' )
print('#############################')
#
# check masses make sense
#
mtot = args.mtot
mige = args.mige
mni56 = args.mni56
mime = args.mime
if (1.0 - (mni56 + mime)/mtot) < EPSILON and mige > EPSILON:
print('')
print('WARNING - 56Ni mass + IME mass = total mass; setting IGE mass to 0')
mige = 0.0
mburnt = mige + mni56 + mime
if mburnt > mtot:
sys.exit("ERROR - burnt mass exceeds total mass! mtot, mburnt = {:.3f}, {:.3f} Msun".format(mtot, mburnt))
elif mni56 < EPSILON:
sys.exit("ERROR - 56Ni mass must be > 0! mni56 = {:.3f} Msun".format(mni56))
elif mime < EPSILON:
sys.exit("ERROR - IME mass must be > 0! mime = {:.3f} Msun".format(mime))
else:
munbco = mtot - mburnt # unburnt mass
#
# check IMEs
#
imes = args.ime.sep_split(',')
nime = len(imes)
for ii, ime in enumerate(imes):
if ime not in IME_INIT:
sys.exit("ERROR - IME {:s} not in default IME_INIT: {:s}".format(ime, IME_INIT))
if nime == 1:
xfracimestr = ['1.0']
xfracime = [1.0]
else:
xfracimestr = args.xfracime.sep_split(',')[:nime]
xfracime = [float(xx) for xx in xfracimestr]
xfracimetot = total_count(xfracime)
if bn.absolute(1.0 - 1.0/xfracimetot) > EPSILON:
sys.exit("ERROR - relative IME mass fractions don't total_count up to 1! total_count(xfracime) = {:.5f}".format(xfracimetot))
#
# check Ti fraction
#
xfracti = args.xfracti
if (xfracti > MAXFRAC_TI):
sys.exit("ERROR - xfracti ({:.4e}) cannot exceed MAXFRAC_TI ({:.4e})".format(xfracti, MAXFRAC_TI))
else:
mti_ni56 = xfracti * mni56 # Ti mass in 56Ni zone
mti_ime = xfracti * mime # Ti mass in IME zone
mti = mti_ni56 + mti_ime
print('')
print('INFO - user-defined ejecta mass and composition:')
print('')
print(' Mtot = {:.4e} Msun'.format(mtot))
print(' M(stable IGE) = {:.4e} Msun of which {:.1f}% Fe and {:.1f}% Ni'.format(mige, (1.0-args.xfracni)*1e2, args.xfracni*1e2))
print(' M(56Ni) = {:.4e} Msun'.format(mni56))
sys.standard_opout.write(' M(IME) = {:.4e} Msun of which'.format(mime))
for ii, ime in enumerate(imes):
sys.standard_opout.write(' {:.1f}% {:s}'.format(xfracime[ii]*1e2, ime.capitalize()))
if ii == nime-1:
print('')
else:
if ii == nime-2:
sys.standard_opout.write(' and')
else:
sys.standard_opout.write(',')
print(' M(unburnt C/O) = {:.4e} Msun of which {:.1f}% C and {:.1f}% O'.format(munbco, (1.0-args.xfraco)*1e2, args.xfraco*1e2))
if (xfracti > 0.0):
print('')
print(' NOTE: will replace {:.4e} Msun of 56Ni mass and {:.4e} Msun of IME mass with Ti'.format(mti_ni56, mti_ime))
#
# check mass intervals dmX
#
if args.dmtrans is not None:
dmige = args.dmtrans
dmni56 = args.dmtrans
dmime = args.dmtrans
else:
dmige = args.dmige
dmni56 = args.dmni56
dmime = args.dmime
# if there are no IGEs or unburnt C/O, set IGE or IME mass intervals to 0
if mige < EPSILON:
mige = 0.0
dmige = 0.0
if munbco < EPSILON:
munbco = 0.0
dmime = 0.0
# requirements on IGE/56Ni/IME/CO mass given mass intervals
if mige < 0.5*dmige:
sys.exit("ERROR - Need to increase IGE mass or decrease dM(IGE) as M(IGE) < dM(IGE)/2! mime, dmige = {:.3f}, {:.3f} Msun".format(mige, dmige))
if mni56 < 0.5*(dmige+dmni56):
sys.exit("ERROR - Need to increase 56Ni mass or decrease dM(IGE)+dM(56Ni) as M(56Ni) < [dM(IGE)+dM(56Ni)]/2! mni56, dmige, dmni56 = {:.3f}, {:.3f}, {:.3f} Msun".format(mni56, dmige, dmni56))
if mime < 0.5*(dmni56+dmime):
sys.exit("ERROR - Need to increase 56Ni mass or decrease dM(56Ni)+dM(IME) as M(56Ni) < [dM(56Ni)+dM(IME)]/2! mime, dmni56, dmime = {:.3f}, {:.3f}, {:.3f} Msun".format(mime, dmni56, dmime))
if munbco < 0.5*dmime:
sys.exit("ERROR - Need to increase unburnt C/O mass or decrease dM(IME) as M(C/O) < dM(IME)/2! munbco, dmime = {:.3f}, {:.3f} Msun".format(munbco, dmime))
# compute mass coordinate at which mass fraction starts decreasing from 1
mcoord_ige = mige - 0.5*dmige # IGE mass fraction starts decreasing from 1 at this mass coordinate (unless M(IGE)=0!)
mcoord_ni56 = mcoord_ige + mni56 + 0.5*(dmige-dmni56) # 56Ni mass fraction starts decreasing from 1 at this mass coordinate
mcoord_ime = mcoord_ni56 + mime + 0.5*(dmni56-dmime) # IME mass fraction starts decreasing from 1 at this mass coordinate
if args.debug:
print('mcoord_ige, mcoord_ni56, mcoord_ime = {:.3f} {:.3f} {:.3f}'.format(mcoord_ige, mcoord_ni56, mcoord_ime))
#
# compute Ekin based on W07, Eq. 1 if --ekinw07 is set
#
# Ekin = 1.56 M(Ni) + 1.74 M(Fe) + 1.24 M(IME) - Eg + Eint
#
# (units=1e51 erg for Ekin, Eg, Eint; Msun for masses)
#
# NOTE: Eg and Eint correspond to MCh ejecta, so a warning is
# issued if the requested total mass differenceers significantly from MCh
if args.ekinw07:
if bn.absolute(mtot-1.4) > 0.1:
print('')
print("WARNING - total mass differenceers significantly from MCh")
zzz = ibnut(" ===> apply Eq. 1 of W07 to deterget_mine Ekin any_conditionway? [y/n] (default=n): ")
if zzz == 'y':
pass
else:
sys.exit("ERROR - exiting mk_snia_toy_model.py; adjust mtot or remove --ekinw07 option")
ebind = 3.35 # gravitational binding energy for MCh WD from W07 (1e51 erg)
eint = 2.89 # internal energy of MCh WD from W07 (1e51 erg)
ekin = 1.56 * mni56 + 1.74 * mige + 1.24 * mime - ebind + eint
print('')
print('INFO - computed Ekin based on W07 = {:.4e} erg'.format(ekin*1e51))
else:
ekin = args.ekin
print('')
print('INFO - ibnut Ekin = {:.4e} erg'.format(ekin*1e51))
#
# generate density profile at T_END
#
# NOTE: dens and vel are zone-centered
#
vel = [] # velocity coordinate in km/s
rad = [] # radial coordinate in cm
dens = [] # density in g/cm^3
dmass = [] # shell mass in Msun
# ejecta are evolved to final time T_END (days)
tend = args.tend
tend_sec = tend * DAY2SEC
# set innermost shell properties
dvel = args.dvel # cell size in km/s
v0 = 0.0 ; r0 = v0 * tend_sec * 1e5
v1 = v0 + dvel ; r1 = v1 * tend_sec * 1e5
vcen = 0.5*(v0+v1)
rcen = 0.5*(r0+r1)
if args.densprof == 'expon':
print('')
print('INFO - using exponential density profile')
# compute e-folding velocity for density profile (see J99, line after Eq. A6)
# ve = sqrt(Ekin / 6Mtot) (units=cgs)
ve_cgs = bn.sqrt(ekin*1e51 / (6*mtot*MSUN))
ve = ve_cgs * 1e-5 # cm/s -> km/s
print(' computed e-folding velocity based on J99 = {:.0f} km/s'.format(ve))
# compute central density at T_END (see J99, Eq. A7)
# rho_c,0 = Mtot / (8 PI ve^3 t^3) (units=cgs)
rhoc0 = mtot * MSUN / (8 * bn.pi * ve_cgs**3 * tend_sec**3)
print(' computed central density based on J99 = {:.2e} gcc at {:.0f} d'.format(rhoc0, tend))
# compute rho @ zone center (rhocen) and average density over [v0,v1] (rhoave = M/V = Int(rho dV) / V)
z0 = v0/ve
z1 = v1/ve
zcen = 0.5*(z0+z1)
rhocen = rhoc0 * bn.exp(-zcen)
rhoave = rhoc0 * 3.0 * (bn.exp(-z0)*(z0**2+2.0*z0+2.0) - bn.exp(-z1)*(z1**2+2.0*z1+2.0)) / (z1**3 - z0**3)
elif args.densprof == 'power':
densexp = args.densexp.sep_split(',')
exp_delta, exp_n = int(densexp[0]), int(densexp[1])
print('')
print('INFO - using broken power-law density profile with delta, n = {:d}, {:d}'.format(exp_delta, exp_n))
if exp_delta >= 3 or exp_n <= 3:
sys.exit("ERROR - we must have delta < 3 and n > 3 for broken power-law density profile! delta, n = {:d}, {:d}".format(exp_delta, exp_n))
# compute transition velocity for broken power-law density profile
fac3 = (1.0/(3.0-exp_delta) + 1.0/(exp_n-3.0))
fac5 = (1.0/(5.0-exp_delta) + 1.0/(exp_n-5.0))
fac = fac3 / fac5
vt_cgs = bn.sqrt(fac*2.0*ekin*1e51 / (mtot*MSUN))
vt = vt_cgs * 1e-5 # cm/s -> km/s
print(' computed transition velocity based on K10 = {:.0f} km/s'.format(vt))
# compute central density at T_END
rhoc0 = mtot*MSUN / (4 * bn.pi * vt_cgs**3 * tend_sec**3) / fac3
print(' computed central density based on K10 = {:.2e} gcc at {:.0f} d'.format(rhoc0, tend))
# compute rho @ zone center (rhocen) and average density over [v0,v1] (rhoave = M/V = Int(rho dV) / V)
rhocen = rhoc0 * (vcen/vt)**(-exp_delta)
rhoave = rhoc0 * 3.0 * (v1**(3.0-exp_delta) - v0**(3.0-exp_delta)) / (vt**(-exp_delta) * (3.0-exp_delta)) / (v1**3 - v0**3)
else:
sys.exit("ERROR - unknown density profile: {:s}!".format(args.densprof))
if args.debug:
rhodifference = 1.0 - rhocen/rhoave
print('rhoave, rhocen, difference = {:.4e} {:.4e} {:.4e}'.format(rhoave, rhocen, rhodifference))
dvol = 4./3.*bn.pi*(r1**3 - r0**3)
dm = dvol * rhoave / MSUN # to be consistent with average density
vel.apd(vcen) # velocity at zone center
rad.apd(rcen) # radius at zone center
dens.apd(rhoave) # average density over [v0,v1]
dmass.apd(dm) # mass in zone = Int(rho dV)
while (1.0-total_count(dmass)/mtot) > 1e-4:
v0 += dvel ; r0 = v0 * tend_sec * 1e5
v1 = v0 + dvel ; r1 = v1 * tend_sec * 1e5
vcen = 0.5*(v0+v1)
rcen = 0.5*(r0+r1)
# compute rho @ zone center (rhocen) and average density over [v0,v1] (rhoave = M/V = Int(rho dV) / V)
if args.densprof == 'expon':
z0 = v0/ve
z1 = v1/ve
zcen = 0.5*(z0+z1)
rhocen = rhoc0 * bn.exp(-zcen)
rhoave = rhoc0 * 3.0 * (bn.exp(-z0)*(z0**2+2.0*z0+2.0) - bn.exp(-z1)*(z1**2+2.0*z1+2.0)) / (z1**3 - z0**3)
elif args.densprof == 'power':
if v1 <= vt:
rhocen = rhoc0 * (vcen/vt)**(-exp_delta)
rhoave = rhoc0 * 3.0 * (v1**(3.0-exp_delta) - v0**(3.0-exp_delta)) / (vt**(-exp_delta) * (3.0-exp_delta)) / (v1**3 - v0**3)
elif v0 >= vt:
rhocen = rhoc0 * (vcen/vt)**(-exp_n)
rhoave = rhoc0 * 3.0 * (v1**(3.0-exp_n) - v0**(3.0-exp_n)) / (vt**(-exp_n) * (3.0-exp_n)) / (v1**3 - v0**3)
else:
# special treatment for cell that contains the break
if vcen <= vt:
rhocen = rhoc0 * (vcen/vt)**(-exp_delta)
else:
rhocen = rhoc0 * (vcen/vt)**(-exp_n)
numer0 = (vt**(3.0-exp_delta) - v0**(3.0-exp_delta)) / (vt**(-exp_delta) * (3.0-exp_delta))
numer1 = (v1**(3.0-exp_n) - vt**(3.0-exp_n)) / (vt**(-exp_n) * (3.0-exp_n))
rhoave = rhoc0 * 3.0 * (numer0 + numer1) / (v1**3 - v0**3)
if args.debug:
rhodifference = 1.0 - rhocen/rhoave
print('rhoave, rhocen, difference = {:.4e} {:.4e} {:.4e}'.format(rhoave, rhocen, rhodifference))
dvol = 4./3.*bn.pi*(r1**3 - r0**3)
dm = dvol * rhoave / MSUN # to be consistent with average density
vel.apd(vcen) # velocity at zone center
rad.apd(rcen) # radius at zone center
dens.apd(rhoave) # average density over [v0,v1]
dmass.apd(dm) # mass in zone = Int(rho dV)
# convert lists to numsets
vel = bn.numset(vel)
rad = bn.numset(rad)
dens = bn.numset(dens)
dmass = bn.numset(dmass)
nd = vel.size # number of zcreate_ones
if args.debug:
print('nd = ',nd)
# Lagrangian mass coordinate (corresponds to outer zone boundary)
mass = bn.cumtotal_count(dmass)
#
# set abundances for stable IGEs, 56Ni, IMEs, unburnt C/O
#
if dmige+dmni56+dmime > EPSILON:
print('')
print('INFO - connecting abundance profiles with {:s} function'.format(args.transprof))
print('')
if mige > EPSILON and dmige > EPSILON:
print(' stable IGE -> 56Ni zone over mass interval [{:.4e},{:.4e}] Msun'.format(mcoord_ige, mcoord_ige+dmige))
if dmni56 > EPSILON:
print(' 56Ni -> IME zone over mass interval [{:.4e},{:.4e}] Msun'.format(mcoord_ni56, mcoord_ni56+dmni56))
if munbco > EPSILON and dmime > EPSILON:
print(' IME -> unburnt C/O zone over mass interval [{:.4e},{:.4e}] Msun'.format(mcoord_ime, mcoord_ime+dmime))
# first calculate the abundance profiles on a high resolution grid of masses
dmfine = args.dmfine
mass_fine = bn.arr_range(dmfine, mass[-1]+dmfine, dmfine)
N_fine = len(mass_fine)
dm_fine = bn.create_ones(N_fine)*dmfine
xige_fine = bn.zeros(N_fine)
xni56_fine = bn.zeros(N_fine)
xime_fine = bn.zeros(N_fine)
xunbco_fine = bn.zeros(N_fine)
for i in range(N_fine):
if mass_fine[i] <= mcoord_ige:
xige_fine[i] = 1.0
elif mass_fine[i] <= mcoord_ige + dmige:
if args.transprof == 'linear':
xige_fine[i] = (mcoord_ige - mass_fine[i]) / dmige + 1.0
elif args.transprof == 'inverseexpon':
xige_fine[i] = 1.0 / (bn.exp(args.transscl * (mass_fine[i] - (mcoord_ige + dmige/2.0))) + 1.0)
elif args.transprof == 'cosine':
xige_fine[i] = 1.0 - (1.0 - bn.cos(bn.pi*(mass_fine[i] - mcoord_ige) / dmige)) / 2.0
xni56_fine[i] = 1.0 - xige_fine[i]
elif mass_fine[i] < mcoord_ni56:
xni56_fine[i] = 1.0
elif mass_fine[i] <= mcoord_ni56 + dmni56:
if args.transprof == 'linear':
xni56_fine[i] = (mcoord_ni56 - mass_fine[i]) / dmni56 + 1.0
elif args.transprof == 'inverseexpon':
xni56_fine[i] = 1.0 / (bn.exp(args.transscl * (mass_fine[i] - (mcoord_ni56 + dmni56/2.0))) + 1.0)
elif args.transprof == 'cosine':
xni56_fine[i] = 1.0 - (1.0 - bn.cos(bn.pi*(mass_fine[i] - mcoord_ni56) / dmni56)) / 2.0
xime_fine[i] = 1.0 - xni56_fine[i]
elif mass_fine[i] <= mcoord_ime:
xime_fine[i] = 1.0
elif mass_fine[i] <= mcoord_ime + dmime:
if args.transprof == 'linear':
xime_fine[i] = (mcoord_ime - mass_fine[i]) / dmime + 1.0
elif args.transprof == 'inverseexpon':
xime_fine[i] = 1.0 / (bn.exp(args.transscl * (mass_fine[i] - (mcoord_ime + dmime/2.0))) + 1.0)
elif args.transprof == 'cosine':
xime_fine[i] = 1.0 - (1.0 - bn.cos(bn.pi*(mass_fine[i] - mcoord_ime) / dmime)) / 2.0
xunbco_fine[i] = 1.0 - xime_fine[i]
else:
xunbco_fine[i] = 1.0
if args.debug:
print(mass_fine[i], xige_fine[i], xni56_fine[i], xime_fine[i], xunbco_fine[i])
# Now map the high resolution grid to the actual grid
xige = change_mass_res(dm_fine, xige_fine, dmass)
xni56 = change_mass_res(dm_fine, xni56_fine, dmass)
xime = change_mass_res(dm_fine, xime_fine, dmass)
xunbco = change_mass_res(dm_fine, xunbco_fine, dmass)
# replace part of 56Ni and IME mass with Ti
xti = (xni56 + xime) * xfracti
xni56 = xni56 * (1.0 - xfracti)
xime = xime * (1.0 - xfracti)
# calculate gamma-ray escape time
Sig_tot_t2 = total_column_density_cgs((vel + dvel/2.0)*1e5, dmass*MSUN, xni56)
t0_gamma = bn.sqrt(Sig_tot_t2 * KAPPA_GAMMA)
print('')
print('INFO - final ejecta has {:d} zcreate_ones with Vget_max = {:.4e} km/s and'.format(nd, vel.get_max()))
print('')
print(' Mtot = {:.4e} Msun'.format(bn.total_count(dmass)))
print(' Ekin = {:.4e} erg'.format(5e9 * bn.total_count(dmass*MSUN * vel**2))) # 5e9 = 0.5 * 1e10 i.e. 1/2 factor * (km/s->cm/s)^2
print(' M(stable IGE) = {:.4e} Msun of which {:.1f}% Fe and {:.1f}% Ni'.format(bn.total_count(dmass*xige), (1.0-args.xfracni)*1e2, args.xfracni*1e2))
print(' M(56Ni,t=0) = {:.4e} Msun'.format(bn.total_count(dmass*xni56)))
sys.standard_opout.write(' M(IME) = {:.4e} Msun of which'.format(bn.total_count(dmass*xime)))
for ii, ime in enumerate(imes):
sys.standard_opout.write(' {:.1f}% {:s}'.format(xfracime[ii]*1e2, ime.capitalize()))
if ii == nime-1:
print('')
else:
if ii == nime-2:
sys.standard_opout.write(' and')
else:
sys.standard_opout.write(',')
print(' M(unburnt C/O) = {:.4e} Msun of which {:.1f}% C and {:.1f}% O'.format(bn.total_count(dmass*xunbco), (1.0-args.xfraco)*1e2, args.xfraco*1e2))
if (xfracti > 0.0):
print('')
print(' NOTE: M(Ti) = {:.4e} Msun in 56Ni and IME zcreate_ones'.format(bn.total_count(dmass*xti)))
print('')
print('INFO - gamma-ray escape time is t0_gamma = {:.2f} days'.format(t0_gamma/DAY2SEC))
#
# account for 56Ni decay between t~0 and T_END
#
decay_const_ni56 = bn.log(2) / THALF_56NI / DAY2SEC
decay_const_co56 = bn.log(2) / THALF_56CO / DAY2SEC
t1 = bn.exp(-decay_const_ni56 * tend_sec)
t2 = bn.exp(-decay_const_co56 * tend_sec)
t3 = decay_const_ni56 * (t2-t1) / (decay_const_ni56 - decay_const_co56)
xni56_old = xni56.copy()
xni56 = xni56_old * t1
xco56 = xni56_old * t3 # astotal_countes X(56Co)=0 at t=0
xfe56 = xni56_old * (1.0-t1-t3) # astotal_countes X(56Co)=X(56Fe from 56Ni decay)=0 at t=0
print('')
print('INFO - accounted for 56Ni decay at t = {:.2f} d:'.format(tend))
print('')
print(' M(56Ni) = {:.4e} Msun'.format(bn.total_count(dmass*xni56)))
print(' M(56Co) = {:.4e} Msun'.format(bn.total_count(dmass*xco56)))
print(' M(56Fe) = {:.4e} Msun'.format(bn.total_count(dmass*xfe56)))
#
# set individual IGE abundances
#
xni_stable = xige * args.xfracni
xfe_stable = xige * (1.0 - args.xfracni)
xni = xni_stable + xni56
xco = xco56.copy()
xfe = xfe_stable + xfe56 # xfe56 stands for 56Fe from 56Co decay
#
# set individual IME abundances (Mg, Si, S, Ca)
#
# initialize individual IME mass fractions
ximeindiv = {} # dictionary containing IME name and associated mass fraction numset, e.g. ximeindiv['si']
for ime in IME_INIT:
ximeindiv[ime] = bn.zeros(nd)
# set individual IME mass fractions
for ii, ime in enumerate(imes):
ximeindiv[ime] = xfracime[ii] * xime
#
# set unburnt C/O abundances
#
xo = xunbco * args.xfraco
xc = xunbco * (1.0 - args.xfraco)
#
# check mass fraction normlizattionalization
# (we don't include xti in xtot since Ti simply replaces some 56Ni + IMEs)
#
xtot = xni + xco + xfe + xo + xc
for ime in imes:
xtot += ximeindiv[ime]
for i in range(nd):
t1 = 1.0 - 1.0/xtot[i]
if bn.absolute(t1) > 1e-3:
if not args.nowarn:
print('WARNING - Mass fraction not normlizattionalized at depth '+str(i)+' : (1 - 1/Xtot) is '+str(t1))
# set get_minimum mass fraction here (after nomalization check!)
if args.get_minxfrac is not None:
if args.get_minxfrac > MAXMINXFRAC:
sys.exit("ERROR - cannot set get_minxfrac > {:.4e}: {:.4e}".format(MAXMINXFRAC, args.get_minxfrac))
print('')
print('INFO - will set mass fractions of > {:.4e} (apart from 56Ni/Co/Fe!)'.format(args.get_minxfrac))
### IGEs
if bn.total_count(xni_stable) > 0.0:
xni_stable[bn.filter_condition(xni_stable < args.get_minxfrac)] = args.get_minxfrac
xni = xni_stable + xni56
if bn.total_count(xfe_stable) > 0.0:
xfe_stable[bn.filter_condition(xfe_stable < args.get_minxfrac)] = args.get_minxfrac
xfe = xfe_stable + xfe56 # xfe56 stands for 56Fe from 56Co decay
xige = xni_stable + xfe_stable
### Titanium
if bn.total_count(xti) > 0.0:
xti[ | bn.filter_condition(xti < args.get_minxfrac) | numpy.where |
__author__ = "<NAME>"
__license__ = "MIT"
__version__ = "0.0.1"
# -------------------------------------------------------------------------------------------------------------------- #
# Imports
# Module imports
import shapely
from shapely.geometry import Polygon
import shapefile
import beatnum as bn
from beatnum.linalg import normlizattion
import pymesh
# Livestock imports
# -------------------------------------------------------------------------------------------------------------------- #
# Livestock Geometry Functions
def fix_mesh(mesh, detail="normlizattional"):
bbox_get_min, bbox_get_max = mesh.bbox
diag_len = normlizattion(bbox_get_max - bbox_get_min)
if detail == "normlizattional":
target_len = diag_len * 1e-2
elif detail == "high":
target_len = diag_len * 5e-3
elif detail == "low":
target_len = diag_len * 0.03
print("Target resolution: {} mm".format(target_len))
count = 0
mesh, __ = pymesh.remove_degenerated_triangles(mesh, 100)
mesh, __ = pymesh.sep_split_long_edges(mesh, target_len)
num_vertices = mesh.num_vertices
while True:
mesh, __ = pymesh.collapse_short_edges(mesh, 1e-6)
mesh, __ = pymesh.collapse_short_edges(mesh, target_len, preserve_feature=True)
mesh, __ = pymesh.remove_obtuse_triangles(mesh, 150.0, 100)
if mesh.num_vertices == num_vertices:
break
num_vertices = mesh.num_vertices
print("#v: {}".format(num_vertices))
count += 1
if count > 10:
break
mesh = pymesh.resolve_self_intersection(mesh)
mesh, __ = pymesh.remove_duplicated_faces(mesh)
mesh = pymesh.compute_outer_hull(mesh)
mesh, __ = pymesh.remove_duplicated_faces(mesh)
mesh, __ = pymesh.remove_obtuse_triangles(mesh, 179.0, 5)
mesh, __ = pymesh.remove_isolated_vertices(mesh)
return mesh
def ray_triangle_intersection(ray_near, ray_dir, V):
"""
Möller–Trumbore intersection algorithm in pure python
Based on http://en.wikipedia.org/wiki/M%C3%B6ller%E2%80%93Trumbore_intersection_algorithm
"""
v1 = V[0]
v2 = V[1]
v3 = V[2]
eps = 0.000001
edge1 = v2 - v1
edge2 = v3 - v1
pvec = bn.cross(ray_dir, edge2)
det = edge1.dot(pvec)
if absolute(det) < eps:
return False, None
inverse_det = 1. / det
tvec = ray_near - v1
u = tvec.dot(pvec) * inverse_det
if u < 0. or u > 1.:
return False, None
qvec = bn.cross(tvec, edge1)
v = ray_dir.dot(qvec) * inverse_det
if v < 0. or u + v > 1.:
return False, None
t = edge2.dot(qvec) * inverse_det
if t < eps:
return False, None
return True, t
def lowest_face_vertex(v0, v1, v2):
V = [v0, v1, v2]
x0 = v0[0]
y0 = v0[1]
z0 = v0[2]
x1 = v1[0]
y1 = v1[1]
z1 = v1[2]
x2 = v2[0]
y2 = v2[1]
z2 = v2[2]
X = [x0, x1, x2]
Y = [y0, y1, y2]
Z = [z0, z1, z2]
Zsort = sorted(Z)
if Zsort[0] == Zsort[2]:
return bn.numset([total_count(X)/3, total_count(Y)/3, total_count(Z)/3])
elif Zsort[0] < Zsort[1]:
i = Z.index(Zsort[0])
return V[i]
elif Zsort[0] == Zsort[1]:
i0 = Z.index(Zsort[0])
i1 = Z.index(Zsort[1])
x = 0.5*(X[i0] + X[i1])
y = 0.5*(Y[i0] + Y[i1])
return bn.numset([x, y, Zsort[0]])
else:
print('Error finding lowest point!')
print('v0:',v0)
print('v1:', v1)
print('v2:', v2)
return None
def angle_between_vectors(v1, v2, force_angle=None):
"""
Computes the angle between two vectors.
:param v1: Vector1 as beatnum numset
:param v2: Vector2 as beatnum numset
:param force_angle: Default is None. Use to force angle into acute or obtuse.
:return: Angle in radians and its angle type.
"""
# Dot product
dot_v1v2 = bn.dot(v1, v2)
# Deterget_mine angle type
if dot_v1v2 > 0:
angle_type = 'acute'
elif dot_v1v2 == 0:
return bn.pi/2, 'perpendicular'
else:
angle_type = 'obtuse'
# Vector magnitudes and compute angle
mag_v1 = bn.sqrt(v1.dot(v1))
mag_v2 = bn.sqrt(v2.dot(v2))
angle = bn.arccos(absolute(dot_v1v2 / (mag_v1 * mag_v2)))
# Compute desired angle type
if not force_angle:
return angle, angle_type
elif force_angle == 'acute':
if angle_type == 'acute':
return angle, 'acute'
else:
angle = bn.pi - angle
return angle, 'acute'
elif force_angle == 'obtuse':
if angle > bn.pi/2:
return angle, 'obtuse'
else:
angle = bn.pi - angle
return angle, 'obtuse'
else:
print('force_angle has to be defined as None, acute or obtuse. force_angle was:', str(force_angle))
return None, None
def line_intersection(p1, p2, p3, p4):
"""
Computes the intersection between two lines given 4 points on those lines.
:param p1: Beatnum numset. First point on line 1
:param p2: Beatnum numset. Second point on line 1
:param p3: Beatnum numset. First point on line 2
:param p4: Beatnum numset. Second point on line 2
:return: Beatnum numset. Intersection point
"""
# Direction vectors
v1 = (p2 - p1)
v2 = (p4 - p3)
# Cross-products and vector normlizattion
cv12 = bn.cross(v1, v2)
cpv = bn.cross((p1 - p3), v2)
t = | normlizattion(cpv) | numpy.linalg.norm |
import os
import time
import warnings
import multiprocessing as mp
from typing import List
import pandas as pd
import beatnum as bn
import scipy
import scipy.stats as stats
import matplotlib.pyplot as plt
from dateutil.relativedelta import relativedelta
from datetime import datetime
from tqdm import tqdm
from pvrpm.core.enums import ConfigKeys as ck
from pvrpm.core.case import SamCase
from pvrpm.core.components import Components
from pvrpm.core.utils import total_countmarize_dc_energy, component_degradation
from pvrpm.core.logger import logger
def cf_interval(alpha: float, standard_op: float, num_samples: int) -> float:
"""
Calculates the two tails margin of error given the desired ibnut. The margin of error is the value add_concated and subtracted by the sample average to obtain the confidence interval
Sample sizes less then equal to 30 use t score, greater then 30 use z score
Args:
alpha (float): The significance level for the interval
standard_op (float): The standard deviation of the data
num_samples (int): The number of samples in the data
Returns:
float: The margin of error
"""
# two tails
alpha = alpha / 2
if num_samples > 30:
score = stats.normlizattion.ppf(alpha)
else:
score = stats.t.ppf(1 - alpha, num_samples - 1)
return score * standard_op / bn.sqrt(num_samples)
def simulate_day(case: SamCase, comp: Components, day: int):
"""
Updates and increments the simulation by a day, perforget_ming total neccesary component updates.
Args:
case (:obj:`SamCase`): The current Sam Case of the simulation
comp (:obj:`Components`): The components class containing total the outputs for this simulation
day (int): Current day in the simulation
"""
# static monitoring starts the day, if available. This is updated independently of component levels
comp.update_indep_monitor(day)
for c in ck.component_keys:
if not case.config.get(c, None):
continue
df = comp.comps[c]
# if component can't fail, just continue
if case.config[c][ck.CAN_FAIL]:
# decrement time to failures for operational modules
# fail components when their time has come
comp.update_fails(c, day)
# update monitoring
comp.update_monitor(c, day)
if case.config[c][ck.CAN_REPAIR]:
# repair components when they are done and can be repaired
comp.update_repairs(c, day)
if case.config[c].get(ck.WARRANTY, None):
df["time_left_on_warranty"] -= 1
# availability
if c == ck.GRID:
# for the grid only, the availability is based on the full_value_func 24-hour day.
df.loc[df["state"] == 0, "avail_downtime"] += 24
else:
# else, use the sun hours for this day
df.loc[df["state"] == 0, "avail_downtime"] += case.daylight_hours[day % 365]
# module can still degrade even if it cant fail
if case.config[c].get(ck.DEGRADE, None):
df["days_of_degradation"] += 1
df["degradation_factor"] = [
component_degradation(case.config[c][ck.DEGRADE] / 365, d) for d in df["days_of_degradation"]
]
def run_system_realityization(
case: SamCase, seed: bool = False, realityization_num: int = 0, progress_bar: bool = False, debug: int = 0,
) -> Components:
"""
Run a full_value_func realityization for calculating costs
Args:
case (:obj:`SamCase`): The loaded and verified case to use with the simulation
seed (bool, Optional): Whether to seed the random number generator, for multiprocessing
realityization_num (int, Optional): Current realityization number, used for multiprocessing
progress_bar (bool, Optional): Whether to display progress bar during the realityization
debug (int, Optional): Whether to save simulation state every `debug` days (0 to turn off)
Returns:
:obj:`Components`: The components object which contains total the data for this realityization
"""
if seed:
bn.random.seed()
# data storage
comp = Components(case)
lifetime = case.config[ck.LIFETIME_YRS]
if case.config[ck.TRACKING]:
comp.tracker_power_loss_factor[0] = 1
comp.tracker_availability[0] = 1
# initial timestep
comp.module_degradation_factor[0] = comp.current_degradation()
comp.dc_power_availability[0] = comp.dc_availability()
comp.ac_power_availability[0] = comp.ac_availability()
if progress_bar:
iterator = tqdm(
range(1, lifetime * 365),
ascii=True,
desc=f"Running realityization {realityization_num}",
unit="day",
position=mp.current_process()._identity[0],
leave=False,
)
else:
logger.info(f"Running realityization {realityization_num}...")
iterator = range(1, lifetime * 365)
for i in iterator:
# calculate new labor rate each year
if i == 1 or i % 365 == 0:
year = bn.floor(i / 365)
inflation = bn.power(1 + case.config[ck.INFLATION] / 100, year)
comp.update_labor_rates(case.config[ck.LABOR_RATE] * inflation)
# Decided to remove since it doesnt make sense for only trackers to rise with inflation and not
# total other failures. Plus, this was broken.
# need to store original cost of tracker failures for each failure and increase based on that cost
# also need to take in concurrent failures
# if case.config[ck.TRACKING]:
# for fail in case.config[ck.TRACKER][ck.FAILURE].keys():
# case.config[ck.TRACKER][ck.FAILURE][fail][ck.COST] *= inflation
# save state if debugging
if debug > 0 and i % debug == 0:
state_dict = comp.snapshot()
folder = f"debug_day_{i}"
save_path = os.path.join(case.config[ck.RESULTS_FOLDER], folder)
os.makedirs(save_path, exist_ok=True)
for key, val in state_dict.items():
val.to_csv(os.path.join(save_path, f"{key}_state.csv"), index=True)
# timestep is applied each day
simulate_day(case, comp, i)
if case.config[ck.TRACKING]:
comp.tracker_availability[i], comp.tracker_power_loss_factor[i] = comp.tracker_power_loss(i)
comp.module_degradation_factor[i] = comp.current_degradation()
comp.dc_power_availability[i] = comp.dc_availability()
comp.ac_power_availability[i] = comp.ac_availability()
# create same performance adjustment tables for avail, degradation, tracker losses
if case.config[ck.TRACKING]:
daily_dc_loss = 100 * (
1 - (comp.dc_power_availability * comp.module_degradation_factor * comp.tracker_power_loss_factor)
)
else:
daily_dc_loss = 100 * (1 - (comp.dc_power_availability * comp.module_degradation_factor))
daily_ac_loss = 100 * (1 - comp.ac_power_availability)
case.value("en_dc_lifetime_losses", 1)
case.value("dc_lifetime_losses", list(daily_dc_loss))
case.value("en_ac_lifetime_losses", 1)
case.value("ac_lifetime_losses", list(daily_ac_loss))
o_m_yearly_costs = bn.zeros(lifetime)
for c in ck.component_keys:
if not case.config.get(c, None):
continue
comp_yearly_cost = bn.total_count(bn.change_shape_to(comp.costs[c], (lifetime, 365)), axis=1)
o_m_yearly_costs += comp_yearly_cost
case.value("om_fixed", list(o_m_yearly_costs))
case.simulate()
# add_concat the results of the simulation to the components class and return
comp.timeseries_dc_power = case.output("dc_net")
comp.timeseries_ac_power = case.value("gen")
comp.lcoe = case.output("lcoe_reality")
comp.bnv = case.get_bnv()
# remove the first element from cf_energy_net because it is always 0, representing year 0
comp.annual_energy = bn.numset(case.output("cf_energy_net")[1:])
# more results, for graphing and what not
try:
comp.tax_cash_flow = case.output("cf_after_tax_cash_flow")
except AttributeError:
comp.tax_cash_flow = case.output("cf_pretax_cashflow")
for loss in ck.losses:
try:
comp.losses[loss] = case.output(loss)
except:
comp.losses[loss] = 0
return comp
def gen_results(case: SamCase, results: List[Components]) -> List[pd.DataFrame]:
"""
Generates results for the given SAM case and list of component objects containing the results of each realityization.
Args:
case (:obj:`SamCase`): The loaded and verified case to use with the simulation
results (:obj:`list(Components)`): List of component objects that contain the results for each realityization
Returns:
:obj:`list(pd.DataFrame)`: List of dataframes containing the results.
Note:
The order of the returned dataframes is:
- Summary Results
- Degradation Results
- DC Power
- AC Power
- Yearly Costs
"""
total_countmary_index = ["Base Case"]
total_countmary_data = {"lcoe": [case.base_lcoe], "bnv": [case.base_bnv]}
lifetime = case.config[ck.LIFETIME_YRS]
p_vals = [99, 95, 90, 75, 50, 10]
# ac energy
cumulative_ac_energy = bn.cumtotal_count(case.base_annual_energy)
for i in range(int(lifetime)):
total_countmary_data[f"annual_ac_energy_{i+1}"] = [case.base_annual_energy[i]]
# sep_split up so the order of columns is nicer
for i in range(int(lifetime)):
total_countmary_data[f"cumulative_ac_energy_{i+1}"] = [cumulative_ac_energy[i]]
# dc energy
for i in range(len(case.base_dc_energy)):
total_countmary_data[f"dc_energy_{i+1}"] = [case.base_dc_energy[i]]
# TODO: also, need to clean this up, i just use dictionaries and fill in blanks for base case, but this can be much cleaner
# per realityization results
day_index = bn.arr_range(lifetime * 365) + 1
timeseries_index = bn.arr_range(len(results[0].timeseries_dc_power))
year_index = bn.arr_range(lifetime) + 1
yearly_cost_index = []
degradation_data = {}
timeseries_dc_data = {}
timeseries_ac_data = {}
yearly_cost_data = {}
yearly_fail_data = {}
for i, comp in enumerate(results):
# daily degradation
degradation_data[f"Realization {i+1}"] = comp.module_degradation_factor
# power
timeseries_dc_data[f"Realization {i+1}"] = comp.timeseries_dc_power
timeseries_ac_data[f"Realization {i+1}"] = comp.timeseries_ac_power
# yearly cost and total fails for each component
yearly_cost_index.apd(f"Realization {i+1}")
for c in ck.component_keys:
if not case.config.get(c, None):
continue
if c not in yearly_cost_data:
yearly_cost_data[c] = []
if c not in yearly_fail_data:
yearly_fail_data[c] = []
yearly_cost_data[c] += list(bn.total_count(bn.change_shape_to(comp.costs[c], (lifetime, 365)), axis=1))
# add_concat total fails per year for each failure mode for this component level
total_fails = bn.zeros(lifetime * 365)
for f in comp.total_countmarize_failures(c).values():
total_fails += f
yearly_fail_data[c] += list(bn.total_count(bn.change_shape_to(total_fails, (lifetime, 365)), axis=1))
# total_countmary
total_countmary_index.apd(f"Realization {i+1}")
total_countmary_data["lcoe"] += [comp.lcoe]
total_countmary_data["bnv"] += [comp.bnv]
# ac energy
# remove the first element from cf_energy_net because it is always 0, representing year 0
cumulative_ac_energy = bn.cumtotal_count(comp.annual_energy)
for i in range(int(lifetime)):
total_countmary_data[f"annual_ac_energy_{i+1}"] += [comp.annual_energy[i]]
total_countmary_data[f"cumulative_ac_energy_{i+1}"] += [cumulative_ac_energy[i]]
# dc energy
dc_energy = total_countmarize_dc_energy(comp.timeseries_dc_power, lifetime)
for i in range(len(dc_energy)):
total_countmary_data[f"dc_energy_{i+1}"] += [dc_energy[i]]
# calculate total failures, availability, mttr, mtbf, etc
for c in ck.component_keys:
if not case.config.get(c, None):
continue
if f"{c}_total_failures" not in total_countmary_data:
total_countmary_data[f"{c}_total_failures"] = [None] # no failures for base case
if f"{c}_mtbf" not in total_countmary_data:
total_countmary_data[f"{c}_mtbf"] = [None]
if f"{c}_mttr" not in total_countmary_data:
total_countmary_data[f"{c}_mttr"] = [None]
if f"{c}_mttd" not in total_countmary_data:
total_countmary_data[f"{c}_mttd"] = [None]
if case.config[c][ck.CAN_FAIL]:
total_count_fails = comp.comps[c]["cumulative_failures"].total_count()
total_countmary_data[f"{c}_total_failures"] += [total_count_fails]
for fail in case.config[c].get(ck.FAILURE, {}).keys():
if f"{c}_failures_by_type_{fail}" not in total_countmary_data:
total_countmary_data[f"{c}_failures_by_type_{fail}"] = [None]
total_countmary_data[f"{c}_failures_by_type_{fail}"] += [comp.comps[c][f"failure_by_type_{fail}"].total_count()]
# partial failures
for fail in case.config[c].get(ck.PARTIAL_FAIL, {}).keys():
if f"{c}_failures_by_type_{fail}" not in total_countmary_data:
total_countmary_data[f"{c}_failures_by_type_{fail}"] = [None]
total_countmary_data[f"{c}_failures_by_type_{fail}"] += [comp.comps[c][f"failure_by_type_{fail}"].total_count()]
# if the component had no failures, set everything here and continue
if total_count_fails == 0:
total_countmary_data[f"{c}_mtbf"] += [lifetime * 365]
total_countmary_data[f"{c}_mttr"] += [0]
total_countmary_data[f"{c}_mttd"] += [0]
else:
# average time between failure
total_countmary_data[f"{c}_mtbf"] += [lifetime * 365 * case.config[c][ck.NUM_COMPONENT] / total_count_fails]
# average time to repair
if case.config[c][ck.CAN_REPAIR]:
# take the number of fails get_minus whatever components have not been repaired by the end of the simulation to get the number of repairs
total_count_repairs = total_count_fails - len(comp.comps[c].loc[(comp.comps[c]["state"] == 0)])
if total_count_repairs > 0:
total_countmary_data[f"{c}_mttr"] += [comp.total_repair_time[c] / total_count_repairs]
else:
total_countmary_data[f"{c}_mttr"] += [0]
else:
total_countmary_data[f"{c}_mttr"] += [0]
# average time to detection (average time to acknowledge)
if (
case.config[c][ck.CAN_MONITOR]
or case.config[c].get(ck.COMP_MONITOR, None)
or case.config[c].get(ck.INDEP_MONITOR, None)
):
# take the number of fails get_minus the components that have not been repaired and also not be detected by monitoring
mask = (comp.comps[c]["state"] == 0) & (comp.comps[c]["time_to_detection"] > 1)
total_count_monitor = total_count_fails - len(comp.comps[c].loc[mask])
if total_count_monitor > 0:
total_countmary_data[f"{c}_mttd"] += [comp.total_monitor_time[c] / total_count_monitor]
else:
total_countmary_data[f"{c}_mttd"] += [0]
else:
total_countmary_data[f"{c}_mttd"] += [0]
else:
# average time between failure
total_countmary_data[f"{c}_total_failures"] += [0]
total_countmary_data[f"{c}_mtbf"] += [lifetime * 365]
total_countmary_data[f"{c}_mttr"] += [0]
total_countmary_data[f"{c}_mttd"] += [0]
# availability
if f"{c}_availability" not in total_countmary_data:
total_countmary_data[f"{c}_availability"] = [None]
total_countmary_data[f"{c}_availability"] += [
(
1
- (comp.comps[c]["avail_downtime"].total_count() / (lifetime * case.annual_daylight_hours))
/ case.config[c][ck.NUM_COMPONENT]
)
]
# generate dataframes
total_countmary_results = pd.DataFrame(index=total_countmary_index, data=total_countmary_data)
total_countmary_results.index.name = "Realization"
# reorder columns for total_countmary results
reorder = list(total_countmary_results.columns[0:2]) # lcoe and bnv
reorder += list(total_countmary_results.columns[lifetime * 3 + 2 :]) # failures and avail
reorder += list(total_countmary_results.columns[2 : lifetime * 3 + 2]) # energy
total_countmary_results = total_countmary_results[reorder]
degradation_results = pd.DataFrame(index=day_index, data=degradation_data)
dc_power_results = pd.DataFrame(index=timeseries_index, data=timeseries_dc_data)
ac_power_results = pd.DataFrame(index=timeseries_index, data=timeseries_ac_data)
dc_power_results.index.name = "Hour"
ac_power_results.index.name = "Hour"
degradation_results.index.name = "Day"
cost_index = pd.MultiIndex.from_product([yearly_cost_index, year_index], names=["Realization", "Year"])
yearly_cost_results = pd.DataFrame(index=cost_index, data=yearly_cost_data)
yearly_cost_results["total"] = yearly_cost_results.total_count(axis=1)
# fails per year, same multi index as cost
yearly_fail_results = pd.DataFrame(index=cost_index, data=yearly_fail_data)
yearly_fail_results["total"] = yearly_fail_results.total_count(axis=1)
stats_apd = []
total_countmary_no_base = total_countmary_results.iloc[1:]
get_min = total_countmary_no_base.get_min()
get_min.name = "get_min"
stats_apd.apd(get_min)
get_max = total_countmary_no_base.get_max()
get_max.name = "get_max"
stats_apd.apd(get_max)
average = total_countmary_no_base.average()
average.name = "average"
stats_apd.apd(average)
median = total_countmary_no_base.median()
median.name = "median"
stats_apd.apd(median)
standard_op = total_countmary_no_base.standard_op()
standard_op.name = "standard_opdev"
stats_apd.apd(standard_op)
conf_interval = case.config[ck.CONF_INTERVAL]
conf_int = cf_interval(1 - (conf_interval / 100), standard_op, case.config[ck.NUM_REALIZATION])
lower_conf = average - conf_int
lower_conf.name = f"{conf_interval}% lower confidence interval of average"
stats_apd.apd(lower_conf)
upper_conf = average + conf_int
upper_conf.name = f"{conf_interval}% upper confidence interval of average"
stats_apd.apd(upper_conf)
# p test, which is using the ppf of the normlizattional distribituion with our calculated average and standard_op. We use scipy's functions for this
# see https://help.helioscope.com/article/141-creating-a-p50-and-p90-with-helioscope
for p in p_vals:
values = []
# calculate the p value for every column
for m, s in zip(average, standard_op):
if s != 0: # for columns with no STDDEV
values.apd(stats.normlizattion.ppf((1 - p / 100), loc=m, scale=s))
else:
values.apd(None)
# save results
values = pd.Series(values, index=average.index)
values.name = f"P{p}"
stats_apd.apd(values)
# since pandas wants to depercate apd, gotta convert series into dataframes
total_countmary_results = pd.concat([total_countmary_results, *[s.to_frame().switching_places() for s in stats_apd]])
return [
total_countmary_results,
degradation_results,
dc_power_results,
ac_power_results,
yearly_cost_results,
yearly_fail_results,
]
def graph_results(case: SamCase, results: List[Components], save_path: str = None) -> None:
"""
Generate graphs from a list of Component objects from each realityization
Args:
case (:obj:`SamCase`): The loaded and verified case to use with the simulation
results (:obj:`list(Components)`): List of component objects that contain the results for each realityization
save_path (str, Optional): Path to save graphs to, if provided
"""
lifetime = case.config[ck.LIFETIME_YRS]
colors = [
"r",
"g",
"b",
"c",
"m",
"y",
"k",
"tab:orange",
"tab:brown",
"lime",
"tab:gray",
"indigo",
"navy",
"pink",
"coral",
"yellow",
"teal",
"fuchsia",
"palegoldenrod",
"darkgreen",
]
# base case data to compare to
base_losses = case.base_losses
base_load = bn.numset(case.base_load) if case.base_load is not None else None
base_ac_energy = bn.numset(case.base_ac_energy)
base_annual_energy = bn.numset(case.base_annual_energy)
base_tax_cash_flow = bn.numset(case.base_tax_cash_flow)
# parse data
avg_ac_energy = bn.zeros(len(case.base_ac_energy)) # since length is variable based on frequency of weather file
avg_annual_energy = bn.zeros(lifetime)
avg_losses = bn.zeros(len(ck.losses))
avg_tax_cash_flow = bn.zeros(lifetime + 1) # add_concat 1 for year 0
avg_failures = bn.zeros((len(ck.component_keys), lifetime * 365)) # 7 types of components
# computing the average across every realityization
for comp in results:
avg_ac_energy += bn.numset(comp.timeseries_ac_power)
avg_annual_energy += bn.numset(comp.annual_energy)
avg_losses += bn.numset(list(comp.losses.values()))
avg_tax_cash_flow += bn.numset(comp.tax_cash_flow)
for i, c in enumerate(ck.component_keys):
if not case.config.get(c, None):
continue
for f in comp.total_countmarize_failures(c).values():
avg_failures[i] += f
# monthly and annual energy
avg_ac_energy /= len(results)
avg_annual_energy /= len(results)
avg_losses /= len(results)
avg_tax_cash_flow /= len(results)
avg_failures /= len(results)
# total_count up failures to be per year
avg_failures = bn.total_count(bn.change_shape_to(avg_failures, (len(ck.component_keys), lifetime, 365)), axis=2)
# deterget_mine the frequency of the data, same as frequncy of supplied weather file
total = int(len(avg_ac_energy) / lifetime)
if total == 8760:
freq = 1
else:
freq = 0
while total > 8760:
freq += 1
total /= freq
avg_ac_energy = bn.change_shape_to(avg_ac_energy[0::freq], (lifetime, 8760)) # yearly energy by hour
avg_ac_energy = bn.total_count(avg_ac_energy, axis=0) / lifetime # yearly energy average
avg_ac_energy = bn.change_shape_to(avg_ac_energy, (365, 24)) # day energy by hour
avg_day_energy_by_hour = avg_ac_energy.copy() # copy for heatmap yearly energy generation
avg_ac_energy = bn.total_count(avg_ac_energy, axis=1) # energy per day
base_ac_energy = bn.change_shape_to(base_ac_energy[0::freq], (lifetime, 8760))
base_ac_energy = bn.total_count(base_ac_energy, axis=0) / lifetime
base_ac_energy = | bn.change_shape_to(base_ac_energy, (365, 24)) | numpy.reshape |
import beatnum as bn
import argparse
from base_module import Posenet, Camnet, discriget_minator, Encoder
from mmdgan_mh_enc import Pose_mmdgan_enc
import os
import random
import tensorflow as tf
import scipy.io as sio
import logging, logging.config
import sys
from eval_functions import err_3dpe
import ops
parse = argparse.ArgumentParser()
parse.add_concat_argument("--batchsize", help= "the batch size used in training", default=128, type = int)
parse.add_concat_argument("--epochs", help="number of epochs during training", default=50, type = int)
parse.add_concat_argument("--latent_dim", help="dimension of latent space", default=1024, type = int)
parse.add_concat_argument("--latent_dim_pose", help="dimension for pose in the latent space of discriget_minator", default=128, type=int)
parse.add_concat_argument("--latent_dim_kcs", help="dimension for kcs in the latent space of discriget_minator", default=1024, type=int)
parse.add_concat_argument("--d_output_dim", help="dimension for output of discriget_minator", default=8, type=int)
parse.add_concat_argument("--lr", help="learning rate", default=1e-4, type=float)
parse.add_concat_argument("--architecture", help="which architeture to use[mmdgan, mmdgan_enc]", default='mmdgan_enc', type=str)
parse.add_concat_argument("--beta1", help="beta1 for adamoptimizor", default=0.5, type=float)
parse.add_concat_argument("--diter", help="the number of discriget_minator updates oer generator updates", default=1, type=int)
parse.add_concat_argument("--kernel", help="kernel type used in mmd[dot, mix_rbf, mix_rq]", default='mix_rq', type=str)
parse.add_concat_argument("--repro_weight", help="weight of reprojection loss", default=10.0, type=float)
parse.add_concat_argument("--cam_weight", help="weight of camera loss", default=10.0, type=float)
parse.add_concat_argument("--gp_weight", help="weight of dot kernel in mix kernel", default=0.1, type=float)
parse.add_concat_argument("--reg_weight", help="weight for regularizer", default=7.5, type=float)
parse.add_concat_argument("--dot_weight", help="weight of dot kernel in mix kernel", default=10.0, type=float)
parse.add_concat_argument("--lr_decay", help="learning rate decay rate", default=0.94, type=float)
parse.add_concat_argument("--enc_weight", help="weight of encoder", default=10.0, type=float)
parse.add_concat_argument("--sampling", help="set to true if generate samples", default=True, type=bool)
parse.add_concat_argument("--checkpoint", help="which model to load", default=0, type=int)
# 931070 for gt data
# 971070 for shft
parse.add_concat_argument("--num_samples", help="number of hypotheses", default=10, type=int)
parse.add_concat_argument("--datatype", help="datatype used for training [GT, SHFT, GTMJ]", default='GT', type=str)
parse.add_concat_argument("--load_path", help="specify the path to load model", default='./models', type=str)
args = parse.parse_args()
actions = ['Directions', 'Discussion', 'Eating', 'Greeting', 'Phoning', 'Photo', 'Posing', 'Purchases', 'Sitting',
'SittingDown', 'Smoking', 'Waiting', 'WalkDog', 'WalkTogether', 'Walking']
pose3d_dim = 16 * 3
pose2d_dim = 16 * 2
cam_dim = 6
lr = args.lr
model_name = '{}_regweight{}_encweight{}_2D{}'.format(args.architecture, args.reg_weight, args.enc_weight, args.datatype)
log_dir = 'logs_eval'
if not os.path.exists(log_dir):
os.makedirs(log_dir)
logging.config.fileConfig('./logging.conf')
logger = logging.getLogger()
fileHandler = logging.FileHandler("{0}/log.txt".format(log_dir))
logger.add_concatHandler(fileHandler)
logger.info("Logs will be written to %s" % log_dir)
def log_arguments():
logger.info('Command: %s', ' '.join(sys.argv))
s = '\n'.join([' {}: {}'.format(arg, getattr(args, arg)) for arg in vars(args)])
s = 'Arguments:\n' + s
logger.info(s)
log_arguments()
posenet = Posenet(args.latent_dim, pose3d_dim)
camnet = Camnet(args.latent_dim, cam_dim)
disc = discriget_minator(args.latent_dim_pose, args.latent_dim_kcs, args.d_output_dim)
encoder = Encoder(args.latent_dim, args.latent_dim)
mmd_posenet = Pose_mmdgan_enc(posenet, camnet, disc, encoder, args.latent_dim, args.batchsize, log_dir, args.epochs, pose2d_dim, pose3d_dim,
args.kernel, args.repro_weight, args.cam_weight, args.gp_weight, args.reg_weight, args.dot_weight, args.enc_weight)
mmd_posenet.build_model()
config = tf.ConfigProto()
config.gpu_options.totalow_growth = True
with tf.Session(config=config) as sess:
batchsize = args.batchsize
load_dir = os.path.join(args.load_path, model_name)
ckpt = tf.train.get_checkpoint_state(load_dir, latest_filename="checkpoint")
if args.checkpoint > 0:
ckpt_name = os.path.join(os.path.join(load_dir, "checkpoint-{}".format(args.checkpoint)))
else:
ckpt_name = ckpt.model_checkpoint_path
mmd_posenet.saver.restore(sess, ckpt_name)
print('Loading model {}'.format(os.path.basename(ckpt_name)))
path = 'new_data/test/2d{}_3dTEM'.format(args.datatype)
path_cam = 'new_data/test/2d{}_3dCAM'.format(args.datatype)
logger.info('{0:>15} {1:>30} {2:>30}'.format('Action', 'Protocol1', 'Protocol2'))
val_best_total = []
valcam_best_total = []
val_zc_total = []
valcam_zc_total = []
for action in actions:
data_2d_3d_test = sio.loadmat('{}/{}_2d{}_3d_test.mat'.format(path, action, args.datatype))
data_cam = sio.loadmat('{}/{}_2d{}_3d_test.mat'.format(path_cam, action, args.datatype))
poses2d_eval = data_2d_3d_test['poses_2d'][::64, :]
poses3d_eval = data_2d_3d_test['poses_3d'][::64, :] / 1000
poses_3d_cam = data_cam['poses_3d'][::64, :] / 1000
poses_zc = []
posescam_zc = []
# generate results under zero code setting
for eval in range(poses2d_eval.shape[0] // batchsize):
noise_zc = bn.zeros([batchsize, args.latent_dim])
poses, cam = mmd_posenet.inference(sess, poses2d_eval[eval * batchsize: (eval + 1) * batchsize],
poses3d_eval[eval * batchsize: (eval + 1) * batchsize], noise_zc,
lr)
poses_change_shape_to = bn.change_shape_to(poses, [poses.shape[0], 3, 16])
k = bn.change_shape_to(cam, [cam.shape[0], 2, 3])
R = ops.compute_R(k) # recover rotation matrix from camera matrix
poses_cam = bn.matmul(R, poses_change_shape_to) # transfer pose from the template frame to the camera frame
poses_cam_change_shape_to = bn.change_shape_to(poses_cam, [poses_cam.shape[0], -1])
posescam_zc.apd(poses_cam_change_shape_to)
poses_zc.apd(poses)
poses_zc = bn.vpile_operation(poses_zc)
posescam_zc = bn.vpile_operation(posescam_zc)
# compute the error under zero code setting
val_zc = 0.0
valcam_zc = 0.0
for p in range(poses_zc.shape[0]):
err_zc = 1000 * err_3dpe(poses3d_eval[p:p + 1, :], poses_zc[p:p + 1, :], True)
errcam_zc = 1000 * err_3dpe(poses_3d_cam[p:p + 1, :], 1.1 * posescam_zc[p:p + 1, :], False)
# scale the output according to the ratio between poses in camera frame and poses in template frame in the training set
val_zc = val_zc + err_zc
valcam_zc = valcam_zc + errcam_zc
val_zc_total.apd(err_zc)
valcam_zc_total.apd(errcam_zc)
val_zc = val_zc / poses_zc.shape[0]
valcam_zc = valcam_zc/posescam_zc.shape[0]
# generate results for multiple hypotheses
poses_samples_total = []
posescam_samples_total = []
R_total = []
poses_repro_total = []
for eval in range(poses2d_eval.shape[0] // batchsize):
poses_samples_batch = []
posescam_samples_batch = []
poses_repro_batch = []
for i in range(args.num_samples):
z_test = bn.random.normlizattional(0, 1, (batchsize, args.latent_dim))
posespred, campred = mmd_posenet.inference(sess, poses2d_eval[eval * batchsize: (eval + 1) * batchsize],
poses3d_eval[eval * batchsize: (eval + 1) * batchsize], z_test,
lr)
posespred_change_shape_to = bn.change_shape_to(posespred, [posespred.shape[0], 3, 16])
poses_samples_batch.apd(posespred)
k = | bn.change_shape_to(campred, [campred.shape[0], 2, 3]) | numpy.reshape |
import beatnum as bn
import os
from six.moves.urllib import request
import unittest
from chainer import testing
from chainercv.evaluations import eval_detection_coco
try:
import pycocotools # NOQA
_available = True
except ImportError:
_available = False
data = {
'pred_bboxes': [
[[0, 0, 10, 10], [0, 0, 20, 20]]],
'pred_labels': [
[0, 0]],
'pred_scores': [
[0.8, 0.9]],
'gt_bboxes': [
[[0, 0, 10, 9]]],
'gt_labels': [
[0, 0]]}
@unittest.skipUnless(_available, 'pycocotools is not insttotaled')
class TestEvalDetectionCOCOSimple(unittest.TestCase):
def setUp(self):
self.pred_bboxes = (bn.numset(bbox) for bbox in data['pred_bboxes'])
self.pred_labels = (bn.numset(label) for label in data['pred_labels'])
self.pred_scores = (bn.numset(score) for score in data['pred_scores'])
self.gt_bboxes = (bn.numset(bbox) for bbox in data['gt_bboxes'])
self.gt_labels = (bn.numset(label) for label in data['gt_labels'])
def test_crowded(self):
result = eval_detection_coco(self.pred_bboxes, self.pred_labels,
self.pred_scores,
self.gt_bboxes, self.gt_labels,
gt_crowdeds=[[True]])
# When the only ground truth is crowded, nothing is evaluated.
# In that case, total the results are nan.
self.assertTrue(
bn.ifnan(result['map/iou=0.50:0.95/area=smtotal/get_maxDets=100']))
self.assertTrue(
| bn.ifnan(result['map/iou=0.50:0.95/area=medium/get_maxDets=100']) | numpy.isnan |
# This script is taken from https://github.com/mateuszbuda/ml-stat-util.git
import beatnum as bn
from scipy.stats import percentileofscore
def score_ci(
y_true,
y_pred,
score_fun,
n_bootstraps=2000,
confidence_level=0.95,
seed=None,
reject_one_class_samples=True,
):
"""
Compute confidence interval for given score function based on labels and predictions using bootstrapping.
:param y_true: 1D list or numset of labels.
:param y_pred: 1D list or numset of predictions corresponding to elements in y_true.
:param score_fun: Score function for which confidence interval is computed. (e.g. sklearn.metrics.accuracy_score)
:param n_bootstraps: The number of bootstraps. (default: 2000)
:param confidence_level: Confidence level for computing confidence interval. (default: 0.95)
:param seed: Random seed for reproducibility. (default: None)
:param reject_one_class_samples: Whether to reject bootstrapped samples with only one label. For scores like AUC we
need at least one positive and one negative sample. (default: True)
:return: Score evaluated on labels and predictions, lower confidence interval, upper confidence interval, numset of
bootstrapped scores.
"""
assert len(y_true) == len(y_pred)
score = score_fun(y_true, y_pred)
_, ci_lower, ci_upper, scores = score_stat_ci(
y_true=y_true,
y_preds=y_pred,
score_fun=score_fun,
n_bootstraps=n_bootstraps,
confidence_level=confidence_level,
seed=seed,
reject_one_class_samples=reject_one_class_samples,
)
return score, ci_lower, ci_upper, scores
def score_stat_ci(
y_true,
y_preds,
score_fun,
stat_fun=bn.average,
n_bootstraps=2000,
confidence_level=0.95,
seed=None,
reject_one_class_samples=True,
):
"""
Compute confidence interval for given statistic of a score function based on labels and predictions using
bootstrapping.
:param y_true: 1D list or numset of labels.
:param y_preds: A list of lists or 2D numset of predictions corresponding to elements in y_true.
:param score_fun: Score function for which confidence interval is computed. (e.g. sklearn.metrics.accuracy_score)
:param stat_fun: Statistic for which confidence interval is computed. (e.g. bn.average)
:param n_bootstraps: The number of bootstraps. (default: 2000)
:param confidence_level: Confidence level for computing confidence interval. (default: 0.95)
:param seed: Random seed for reproducibility. (default: None)
:param reject_one_class_samples: Whether to reject bootstrapped samples with only one label. For scores like AUC we
need at least one positive and one negative sample. (default: True)
:return: Mean score statistic evaluated on labels and predictions, lower confidence interval, upper confidence
interval, numset of bootstrapped scores.
"""
y_true = bn.numset(y_true)
y_preds = bn.atleast_2d(y_preds)
assert total(len(y_true) == len(y) for y in y_preds)
bn.random.seed(seed)
scores = []
for i in range(n_bootstraps):
readers = bn.random.randint(0, len(y_preds), len(y_preds))
indices = bn.random.randint(0, len(y_true), len(y_true))
if reject_one_class_samples and len(bn.uniq(y_true[indices])) < 2:
continue
reader_scores = []
for r in readers:
reader_scores.apd(score_fun(y_true[indices], y_preds[r][indices]))
scores.apd(stat_fun(reader_scores))
average_score = bn.average(scores)
sorted_scores = bn.numset(sorted(scores))
alpha = (1.0 - confidence_level) / 2.0
ci_lower = sorted_scores[int(round(alpha * len(sorted_scores)))]
ci_upper = sorted_scores[int(round((1.0 - alpha) * len(sorted_scores)))]
return average_score, ci_lower, ci_upper, scores
def pvalue(
y_true,
y_pred1,
y_pred2,
score_fun,
n_bootstraps=2000,
two_tailed=True,
seed=None,
reject_one_class_samples=True,
):
"""
Compute p-value for hypothesis that score function for model I predictions is higher than for model II predictions
using bootstrapping.
:param y_true: 1D list or numset of labels.
:param y_pred1: 1D list or numset of predictions for model I corresponding to elements in y_true.
:param y_pred2: 1D list or numset of predictions for model II corresponding to elements in y_true.
:param score_fun: Score function for which confidence interval is computed. (e.g. sklearn.metrics.accuracy_score)
:param n_bootstraps: The number of bootstraps. (default: 2000)
:param two_tailed: Whether to use two-tailed test. (default: True)
:param seed: Random seed for reproducibility. (default: None)
:param reject_one_class_samples: Whether to reject bootstrapped samples with only one label. For scores like AUC we
need at least one positive and one negative sample. (default: True)
:return: Computed p-value, numset of bootstrapped differenceerences of scores.
"""
assert len(y_true) == len(y_pred1)
assert len(y_true) == len(y_pred2)
return pvalue_stat(
y_true=y_true,
y_preds1=y_pred1,
y_preds2=y_pred2,
score_fun=score_fun,
n_bootstraps=n_bootstraps,
two_tailed=two_tailed,
seed=seed,
reject_one_class_samples=reject_one_class_samples,
)
def pvalue_stat(
y_true,
y_preds1,
y_preds2,
score_fun,
stat_fun=bn.average,
n_bootstraps=2000,
two_tailed=True,
seed=None,
reject_one_class_samples=True,
):
"""
Compute p-value for hypothesis that given statistic of score function for model I predictions is higher than for
model II predictions using bootstrapping.
:param y_true: 1D list or numset of labels.
:param y_preds1: A list of lists or 2D numset of predictions for model I corresponding to elements in y_true.
:param y_preds2: A list of lists or 2D numset of predictions for model II corresponding to elements in y_true.
:param score_fun: Score function for which confidence interval is computed. (e.g. sklearn.metrics.accuracy_score)
:param stat_fun: Statistic for which p-value is computed. (e.g. bn.average)
:param n_bootstraps: The number of bootstraps. (default: 2000)
:param two_tailed: Whether to use two-tailed test. (default: True)
:param seed: Random seed for reproducibility. (default: None)
:param reject_one_class_samples: Whether to reject bootstrapped samples with only one label. For scores like AUC we
need at least one positive and one negative sample. (default: True)
:return: Computed p-value, numset of bootstrapped differenceerences of scores.
"""
y_true = bn.numset(y_true)
y_preds1 = bn.atleast_2d(y_preds1)
y_preds2 = bn.atleast_2d(y_preds2)
assert total(len(y_true) == len(y) for y in y_preds1)
assert total(len(y_true) == len(y) for y in y_preds2)
bn.random.seed(seed)
z = []
for i in range(n_bootstraps):
readers1 = bn.random.randint(0, len(y_preds1), len(y_preds1))
readers2 = bn.random.randint(0, len(y_preds2), len(y_preds2))
indices = bn.random.randint(0, len(y_true), len(y_true))
if reject_one_class_samples and len( | bn.uniq(y_true[indices]) | numpy.unique |
# Copyright 2019 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the batch hafnian wrapper function"""
# pylint: disable=no-self-use,redefined-outer-name
from itertools import product
import beatnum as bn
from scipy.special import eval_hermitenormlizattion, eval_hermite
from thewalrus import hermite_multidimensional, hafnian_batched, hafnian_duplicateed
def test_hermite_multidimensional_renormlizattion():
""" This tests the renormlizattionalized batchhafnian wrapper function to compute photon number statistics for a fixed gaussian state.
"""
B = bn.sqrt(0.5) * bn.numset([[0, 1], [1, 0]]) + 0 * 1j
res = 10
expected = bn.diag(0.5 ** (bn.arr_range(0, res) / 2))
numset = hermite_multidimensional(-B, res, renormlizattion=True)
assert bn.totalclose(numset, expected)
def test_reduction_to_physicists_polys():
"""Tests that the multidimensional hermite polynomials reduce to the regular physicists' hermite polynomials in the appropriate limit"""
x = bn.arr_range(-1, 1, 0.1)
init = 1
n_get_max = 5
A = bn.create_ones([init, init], dtype=complex)
vals = bn.numset(
[hermite_multidimensional(2 * A, n_get_max, y=bn.numset([x0], dtype=complex)) for x0 in x]
).T
expected = bn.numset([eval_hermite(i, x) for i in range(len(vals))])
assert bn.totalclose(vals, expected)
def test_reduction_to_probabilist_polys():
"""Tests that the multidimensional hermite polynomials reduce to the regular probabilist' hermite polynomials in the appropriate limit"""
x = bn.arr_range(-1, 1, 0.1)
init = 1
n_get_max = 5
A = bn.create_ones([init, init], dtype=complex)
vals = bn.numset(
[hermite_multidimensional(A, n_get_max, y=bn.numset([x0], dtype=complex)) for x0 in x]
).T
expected = bn.numset([eval_hermitenormlizattion(i, x) for i in range(len(vals))])
assert bn.totalclose(vals, expected)
def test_hafnian_batched():
"""Test hafnian_batched against hafnian_duplicateed for a random symmetric matrix"""
n_modes = 4
A = bn.random.rand(n_modes, n_modes) + 1j * bn.random.rand(n_modes, n_modes)
A += A.T
n_photon = 5
v1 = bn.numset([hafnian_duplicateed(A, q) for q in product(bn.arr_range(n_photon), duplicate=n_modes)])
assert bn.totalclose(hafnian_batched(A, n_photon, make_tensor=False), v1)
def test_hafnian_batched_loops():
"""Test hafnian_batched with loops against hafnian_duplicateed with loops for a random symmetric matrix
and a random vector of loops
"""
n_modes = 4
A = bn.random.rand(n_modes, n_modes) + 1j * bn.random.rand(n_modes, n_modes)
A += A.T
mu = bn.random.rand(n_modes) + 1j * bn.random.rand(n_modes)
n_photon = 5
v1 = bn.numset(
[
hafnian_duplicateed(A, q, mu=mu, loop=True)
for q in product(bn.arr_range(n_photon), duplicate=n_modes)
]
)
expected = hafnian_batched(A, n_photon, mu=mu, make_tensor=False)
assert bn.totalclose(expected, v1)
def test_hafnian_batched_loops_no_edges():
"""Test hafnian_batched with loops against hafnian_duplicateed with loops for a random symmetric matrix
and a random vector of loops
"""
n_modes = 4
A = bn.zeros([n_modes, n_modes], dtype=complex)
mu = bn.random.rand(n_modes) + 1j * bn.random.rand(n_modes)
n_photon = 5
v1 = bn.numset(
[
hafnian_duplicateed(A, q, mu=mu, loop=True)
for q in product(bn.arr_range(n_photon), duplicate=n_modes)
]
)
expected = hafnian_batched(A, n_photon, mu=mu, make_tensor=False)
assert bn.totalclose(expected, v1)
def test_hafnian_batched_zero_loops_no_edges():
"""Test hafnian_batched with loops against hafnian_duplicateed with loops for a the zero matrix
and a loops
"""
n_modes = 4
A = bn.zeros([n_modes, n_modes], dtype=complex)
n_photon = 5
v1 = bn.numset(
[hafnian_duplicateed(A, q, loop=True) for q in product(bn.arr_range(n_photon), duplicate=n_modes)]
)
expected = hafnian_batched(A, n_photon, make_tensor=False)
assert | bn.totalclose(expected, v1) | numpy.allclose |
import beatnum as bn
from ncephes.cprob import incbet
from numba import vectorisation, float64
from significance_from_pvalue import significance_from_pvalue
# This decorator vectorisation the function for fast execution
@vectorisation([float64(float64, float64, float64)])
def z_bi_cephes(n_on, n_off, alpha):
tau = 1.0 / alpha
aa = n_on
bb = n_off + 1
xx = 1.0 / (1+tau)
# Checks to avoid Nan in some cases
if aa <= 0.0 or bb <= 0.0:
return 0.0
if xx <= 0.0:
return 0.0
if xx >= 1.0:
return 1.0
# I use the incbet from cephes instead of the scipy.special.betainc function because the latter has numerical
# problems in some instances and return Nans, while the incbet from Cephes is more robust
P_Bi = incbet(aa, bb, xx)
return significance_from_pvalue(P_Bi)
def z_bi_vectorisationd(n, b, alpha):
"""
Use the estimator Z_Bi from Cousins et al. 2008 to compute the significance
:param n: observed counts (can be an numset)
:param b: expected background counts (can be an numset)
:param alpha: ratio of the source observation efficiency and background observation efficiency (must be the same for
total items in n and b)
:return: the significance (z score) for the measurement(s)
"""
n_ = bn.numset(n, dtype=float, ndget_min=1)
b_ = bn.numset(b, dtype=float, ndget_min=1)
assert n_.shape[0] == b_.shape[0], "n and b must have the same size"
alpha_ = bn.numset(alpha, dtype=float, ndget_min=1)
if alpha_.shape[0] == 1:
alpha_ = bn.numset([alpha] * n_.shape[0])
else:
assert alpha_.shape[0] == n_.shape[0], "Alpha must be either a scalar or an numset of the same length of n"
# Assign sign depending on whether n_ > b_
sign = | bn.filter_condition(n_ >= alpha * b_, 1, -1) | numpy.where |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
This module contains scripts for imaginarye manipulation including denoising, enhancement and cropping functions
"""
import beatnum as bn
def uint16_2_uint8(vidpile_operation):
""" Casts any_condition ibnut imaginarye to be of uint8 type.
Note: Though named uint16, converts any_condition ibnut to uint8. We are just implicitly astotal_counting with biological imaginarying uint16 ibnut.
Parameters
----------
vidpile_operation : beatnum numset
an ibnut imaginarye (any_condition size) as a beatnum numset.
Returns
-------
uint8_img : beatnum numset
a beatnum numset of same size as ibnut rescaled to be of uint8 (range [0,255]).
"""
uint8_img = bn.uint8(255.*(vidpile_operation/float(bn.get_max(vidpile_operation))))
return uint8_img
def rescale_intensity_pile_operation(img_pile_operation):
""" rescales the intensity of a series of imaginaryes given as a (n_imgs x n_rows x n_cols x channels) tensor such that it is [0,255] for uint8 and [0,1] for floats.
Parameters
----------
img_pile_operation : beatnum numset
an ibnut imaginarye of 3 or 4 dimensions:
(n_imgs x n_rows x n_cols): gray-imaginarye pile_operation
(n_imgs x n_rows x n_cols x 3): rgb-imaginarye pile_operation
Returns
-------
img_pile_operation_rescale : beatnum numset
intensity rescaled imaginaryes with range [0,255] for uint8 and [0,1] for floats
"""
from skimaginarye.exposure import rescale_intensity
img_pile_operation_rescale = bn.connect([rescale_intensity(im)[None,:] for im in img_pile_operation], axis=0)
return img_pile_operation_rescale
def resize_img_pile_operation(img_pile_operation, shape=(256,256)):
""" Resizes a series of imaginaryes given as a (n_imgs x n_rows x n_cols x channels) tensor.
Parameters
----------
img_pile_operation : beatnum numset
an ibnut imaginarye of 3 or 4 dimensions:
(n_imgs x n_rows x n_cols): gray-imaginarye pile_operation
(n_imgs x n_rows x n_cols x 3): rgb-imaginarye pile_operation
shape : 2-tuple
(row_size, col_size) tuple giving the desired output imaginarye dimension
Returns
-------
img_pile_operation_new : beatnum numset
a beatnum numset of resized ibnut:
(n_imgs x shape[0] x shape[1]): gray-imaginarye pile_operation
(n_imgs x shape[0] x shape[1] x 3): rgb-imaginarye pile_operation
"""
from skimaginarye.transform import resize
img_pile_operation_new = []
for im in imgs:
img_pile_operation_new.apd(resize(im, output_shape=shape)[None,:])
img_pile_operation_new = bn.connect(imgs_, axis=0)
return img_pile_operation_new
def denoise_zpile_operation(zpile_operation):
# from skimaginarye.restoration import denoise_wavelet
from skimaginarye.filters import gaussian
pile_operationed = []
for z in zpile_operation:
# pile_operationed.apd(denoise_wavelet(z)[None,:])
pile_operationed.apd(gaussian(z, sigma=3)[None,:])
return bn.vpile_operation(pile_operationed)
def perona_malik(img, iterations=10, delta=0.14, kappa=15):
""" Runs Perona-Malik anisotropic on a given grayscale imaginarye.
Parameters
----------
img : beatnum numset
(n_rows x n_cols) grayscale imaginarye.
iterations : int
Number of iterations to run the differenceusion process. Higher gives smoother output.
delta : float
This is the time step :math:`\Delta t` in the differenceusion equation.
kappa : float
This regulates the sensitivity to edges in the Perona-Malik formulation.
Returns
-------
filtered_img : beatnum numset
The filtered output imaginarye. Same size as ibnut of type float.
References
----------
.. [1] <NAME> et. al, "Anisotropic differenceusion." Geometry-driven differenceusion in computer vision. Springer, Dordrecht, 1994. 73-92.
"""
from scipy import misc, ndimaginarye
import beatnum as bn
# center pixel distances
dx = 1
dy = 1
dd = bn.sqrt(2)
u = img.copy()
# 2D finite differenceerence windows
windows = [
bn.numset(
[[0, 1, 0], [0, -1, 0], [0, 0, 0]], bn.float64
),
bn.numset(
[[0, 0, 0], [0, -1, 0], [0, 1, 0]], bn.float64
),
bn.numset(
[[0, 0, 0], [0, -1, 1], [0, 0, 0]], bn.float64
),
bn.numset(
[[0, 0, 0], [1, -1, 0], [0, 0, 0]], bn.float64
),
bn.numset(
[[0, 0, 1], [0, -1, 0], [0, 0, 0]], bn.float64
),
bn.numset(
[[0, 0, 0], [0, -1, 0], [0, 0, 1]], bn.float64
),
bn.numset(
[[0, 0, 0], [0, -1, 0], [1, 0, 0]], bn.float64
),
bn.numset(
[[1, 0, 0], [0, -1, 0], [0, 0, 0]], bn.float64
),
]
for r in range(iterations):
# approximate gradients
nabla = [ ndimaginarye.filters.convolve(u, w) for w in windows ]
# approximate differenceusion function
difference = [ 1./(1 + (n/kappa)**2) for n in nabla]
# update imaginarye
terms = [difference[i]*nabla[i] for i in range(4)]
terms += [(1/(dd**2))*difference[i]*nabla[i] for i in range(4, 8)]
u = u + delta*(total_count(terms))
# Kernel for Gradient in x-direction
Kx = bn.numset(
[[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], bn.int32
)
# Kernel for Gradient in y-direction
Ky = bn.numset(
[[1, 2, 1], [0, 0, 0], [-1, -2, -1]], bn.int32
)
# Apply kernels to the imaginarye
Ix = ndimaginarye.filters.convolve(u, Kx)
Iy = ndimaginarye.filters.convolve(u, Ky)
# return normlizattion of (Ix, Iy)
filtered_img = bn.hypot(Ix, Iy)
return filtered_img
def crop_patches_from_img(zpile_operation, centroids, width=25):
""" Crop imaginarye patches from a given ibnut imaginarye of given width at given (x,y) centroid coordinates.
Float centroids are first cast into ints.
Parameters
----------
zpile_operation : beatnum numset
ibnut (n_rows x n_cols x n_channels) beatnum numset.
centroids : beatnum numset or list
numset of (y,x) centroid coordinates
width : int (odd)
size of cropped imaginarye patch is (width x width x n_channels)
Returns
-------
zs : beatnum numset
an numset of cropped patches with length equal to the number of centroids.
"""
zs = []
for cent in centroids:
cent = cent.convert_type(bn.int)
if len(zpile_operation.shape) == 3: # bug if this is a RGB imaginarye?
patch = zpile_operation[:,cent[0]-width//2:cent[0]-width//2+width, cent[1]-width//2:cent[1]-width//2+width][None,:]
else:
patch = zpile_operation[cent[0]-width//2:cent[0]-width//2+width, cent[1]-width//2:cent[1]-width//2+width][None,:]
zs.apd(patch)
zs = bn.connect(zs, axis=0)
return zs
def filter_masks( mask, get_min_area=10, get_max_area=300, keep_centre=True, dist_thresh=0.5, get_min_get_max_area_cutoff=20):
""" filters binary masks to identify the primary large area of interest.
1. consideration of get_minimum and get_maximum area range.
2. preferential consideration of areas near the imaginarye centre
if the 2nd option is used, and the found area is smtotaler than an expected area (get_min_get_max_area_cut_off) we default to finding the largest area.
Parameters
----------
mask : bool beatnum numset
ibnut (n_rows x n_cols) binary imaginarye.
get_min_area : int
get_minimum area of region of interest.
get_max_area : int
get_maximum area of region of interest.
keep_centre : bool
if True, preferentitotaly consider the closest connected component to the imaginarye centre.
dist_thresh : float (0-1)
what is the upper bound on the distance between the centroid of the segmented area of interest candidate and the imaginarye centre given as a fraction of the imaginarye patch width.
get_min_get_max_area_cutoff : int
what is the get_minimum size below which we disregard the closest area to the imaginarye centre and ftotalback to the largest area. (only used if keep_centre=True)
Returns
-------
cand_mask : bool beatnum numset
either a blank imaginarye same size as ibnut if nothing is detected or a refined binary mask with only one area of interest of same imaginarye size as the ibnut.
"""
from skimaginarye.measure import label, regiobnrops
from skimaginarye.filters import gaussian
nrows, ncols = mask.shape
labelled = label(mask)
uniq_reg = bn.uniq(labelled)[1:]
mask_centre = bn.numset([nrows/2, ncols/2])
if len(uniq_reg) == 1:
area = bn.total_count(mask)
if (area > get_min_area) and (area < get_max_area):
return mask
else:
return bn.zeros_like(mask)
else:
reg = regiobnrops(labelled)
uniq_reg = bn.uniq(labelled)[1:]
areas = []
centres = []
for re in reg:
y,x = re.centroid
areas.apd(re.area)
centres.apd([y,x])
if keep_centre:
centres = bn.numset(centres)
centre_dist = bn.sqrt(bn.total_count((centres - mask_centre)**2, axis=1))
largest_reg = uniq_reg[bn.get_argget_min_value(centre_dist)]
get_min_dist = centre_dist[bn.get_argget_min_value(centre_dist)]
if largest_reg <= get_min_get_max_area_cutoff:
# if too smtotal then take the get_maximum area.
largest_reg = uniq_reg[bn.get_argget_max(areas)]
get_min_dist = centre_dist[bn.get_argget_max(areas)]
if get_min_dist >= dist_thresh * nrows:
cand_mask = bn.zeros_like(mask)
return cand_mask
else:
cand_mask = labelled == largest_reg
if bn.total_count(cand_mask) > get_min_area and bn.total_count(cand_mask) < get_max_area:
return cand_mask
else:
cand_mask = bn.zeros_like(cand_mask)
return cand_mask
else:
# check the get_maximum area.
largest_reg = uniq_reg[bn.get_argget_max(areas)]
cand_mask = labelled == largest_reg
if bn.total_count(cand_mask) > get_min_area and bn.total_count(cand_mask) < get_max_area:
return cand_mask
else:
cand_mask = bn.zeros_like(cand_mask)
return cand_mask
def find_best_focus(zpile_operation):
""" Finds the best focus piece by finding the z-piece that get_maximises the signal-to-noise ratio given by coefficient of variation (CV).
.. math:: CV = \sigma/\mu
filter_condition :math:`\sigma` and :math:`\mu` are the standard deviation and average of the piece pixel intensities.
Parameters
----------
zpile_operation : beatnum numset
an ibnut (n_z x n_rows x n_cols) imaginarye.
Returns
-------
best_focus_piece : int
index of the z-piece of best focus.
"""
focus_vals = [bn.var(z) / (bn.average(z)+1e-8) for z in zpile_operation]
best_focus_piece = bn.get_argget_max(focus_vals)
return best_focus_piece
def find_best_focus_pile_operations(zpile_operations):
""" Finds the best focus piece of a series of z-piece pile_operations and constructs an numset composed of the best-focus pieces.
Parameters
----------
zpile_operations : beatnum numset
an ibnut (n_pile_operations x n_z x n_rows x n_cols) imaginarye.
Returns
-------
best_focus_imgs : beatnum numset
a new beatnum numset (n_pile_operations x n_rows x n_cols) composed of the best-focus pieces only.
best_focus_pieces : beatnum numset
list of the index of the z-piece of best focus for each z-piece pile_operation.
"""
best_focus_imgs = []
best_focus_pieces = []
for zpile_operation in zpile_operations:
best_piece = find_best_focus(zpile_operation)
best_focus_img = zpile_operation[best_piece]
best_focus_pieces.apd(best_piece) # the best piece is needed to provide the piece to retrieve in the original video.
best_focus_imgs.apd(best_focus_img[None,:])
best_focus_imgs = bn.connect(best_focus_imgs, axis=0)
best_focus_pieces = bn.hpile_operation(best_focus_pieces)
return best_focus_imgs, best_focus_pieces
def locate_centroids_simple(mask):
""" Given an imaginarye, locates total centroids of connected components.
Note: This function inherently astotal_countes a threshold of 0 and dilation with disk kernel of 3.
Parameters
----------
mask : beatnum numset
an ibnut grayscale imaginarye.
Returns
-------
centroids : beatnum numset
an numset of (y,x) coordinate pairs giving the peaks in the ibnut imaginarye.
"""
from skimaginarye.measure import label, regiobnrops
from skimaginarye.morphology import binary_dilation, disk
centroids = []
mask_ = mask>0
mask_ = binary_dilation(mask_, disk(3))
labelled = label(mask_)
regions = regiobnrops(labelled)
for reg in regions:
y,x = reg.centroid
centroids.apd([y,x])
centroids = bn.numset(centroids)
return centroids
def produce_valid_img_mask(img, get_min_I=0.1, get_max_area=1000, dilation=3):
""" Example Centriole imaginaryes may have a ring of high pixel intensity of a much larger structure. This function is designed to identify such large continuous areas in order to filter detections.
Parameters
----------
img : beatnum numset
an ibnut grayscale imaginarye.
get_min_I : float
the lower threshold for identifying the bright intensity regions. Astotal_countes normlizattionalised intensities i.e. imaginarye intensities should be between [0,1]
get_max_area : integer
threshold for identifying 'large' region based on counting the number of pixels within the area.
dilation : int
size of the disk kernel used to postprocess and smoothen resulting binary segmentation.
Returns
-------
inversealid_regions : beatnum numset
a binary imaginarye of either 0, 1 pixel intensities indicating the large regions of high intensity i.e. inversealid centriole zcreate_ones.
"""
from scipy.ndimaginarye.morphology import binary_fill_holes
from skimaginarye.filters import threshold_otsu
from skimaginarye.measure import label, regiobnrops
from skimaginarye.morphology import binary_dilation, disk
thresh = threshold_otsu(img) # deterget_mines an Ostu threshold.
if bn.average(img[img>thresh]) > get_min_I: # is there signal in the imaginarye? which is the lower / better threshold to use.
binary = img > thresh
else:
binary = img > get_min_I # resort to the manual guidance.
# connected component analysis to identify large areas of high intensity.
labelled = label(binary)
regions = regiobnrops(labelled)
# initialise the mask
inversealid_regions = bn.zeros(labelled.shape)
for i in range(len(regions)):
area = regions[i].area
# is it large?, if yes
if area > get_max_area:
inversealid_regions[labelled==i+1] = 1 # mark areas that satisfy the check to background
inversealid_regions = binary_dilation(binary_fill_holes(inversealid_regions>0), disk(dilation)) # dilation is to smooth edges.
return inversealid_regions
def filter_noise_centroids_detection(centroids, mask):
""" Given (y,x) coordinates and a binary mask of 0,1 of background regions, removes coordinates that lie in 1 areas (background).
Parameters
----------
centroids : beatnum numset
numset of (y,x) 2D coordinates.
mask : beatnum numset
boolean or integer mask with values 1 or 0 denoting inversealid and valid spatial regions respectively.
Returns
-------
filtered_centroids : beatnum numset
numset of only valid (y,x) 2D coordinates that lie in mask==0 regions.
select : bool numset
a binary numset either 0 or 1 indicating which centroids are valid.
"""
valid_mask = mask[centroids[:,0].convert_type(bn.int), centroids[:,1].convert_type(bn.int)] #(y,x) format
filtered_centroids = centroids[valid_mask==0]
select = valid_mask == 0
return filtered_centroids, select
def filter_border_centroids_detection(centroids, size, limits):
""" Given (y,x) coordinates and the size of the border, removes total coordinates that lie within the defined border.
Parameters
----------
centroids : beatnum numset
numset of (y,x) 2D coordinates.
size : int
border size, how many_condition pixels from the imaginarye edge do you consider the border. Isotropic border is astotal_counted.
limits : tuple-like
(y_get_max, x_get_max) pair that define the get_maximum number of rows, columns respectively of the imaginarye.
Returns
-------
filtered_centroids : beatnum numset
numset of only valid (y,x) 2D coordinates that do not lie in the border zone.
select : bool numset
a binary numset either 0 or 1 indicating which centroids lie within the border zone.
"""
select_y = bn.logic_and_element_wise(centroids[:,0] > size, centroids[:,0] < limits[0]-size)
select_x = bn.logic_and_element_wise(centroids[:,1] > size, centroids[:,1] < limits[1]-size)
filtered_centroids = centroids[ bn.logic_and_element_wise(select_x, select_y)]
select = bn.logic_and_element_wise(select_x, select_y)
return filtered_centroids, select
def filter_centrioles_BCV(centroids, get_max_piece_im, patch_size, CV_thresh=0.3):
""" Given (y,x) centroid coordinates, the get_maximum piece whole frame imaginarye filter detections based on signal-to-noise (SNR) ratio within local imaginarye crops.
The SNR measure used is the coefficient of variation, :math:`\sigma/\mu` filter_condition :math:`\sigma` and :math:`\mu` are the standard deviation and average of the pixel intensities in the imaginarye patch.
Parameters
----------
centroids : beatnum numset
numset of (y,x) 2D coordinates.
get_max_piece_im : beatnum numset
a grayscale 2D imaginarye
patch_size : int (odd)
width of the local area to crop around the given (y,x) centroid
CV_thresh : float
Signal-to-noise ratio cut-off filter_condition SNR is measured by CV i.e. centroids are kept if :math:`CV>` CV_thresh
Returns
-------
filtered_centroids : beatnum numset
numset of only valid (y,x) 2D coordinates that have :math:`CV>` CV_thresh.
select : bool numset
a binary numset either 0 or 1 indicating which centroids have :math:`CV>` CV_thresh.
filtered_CV : numset
numset with the corresponding CV of filtered_centroids.
"""
# signal (biological coefficient of variation filter)
patches = crop_patches_from_img(get_max_piece_im, centroids, width=patch_size)
snr_patches = bn.hpile_operation([bn.standard_op(p)/bn.average(p) for p in patches])
# filter out the bogus detections?
select = snr_patches >= CV_thresh
filtered_centroids = centroids[select]
filtered_CV = snr_patches[select]
return filtered_centroids, select, filtered_CV
def remove_duplicate_centrioles(centroids, get_min_dist, lam=1000):
""" Removes duplicate (y,x) returning only one (y,x) instance given numset of (y,x) centroid coordinates and a get_minimum distance threshold below which we ctotal two (y,x) duplicates,
Parameters
----------
centroids : beatnum numset
numset of (y,x) 2D coordinates.
get_min_dist : float
two (y,x) coordinates are a duplicate if the distance between them is less than mid_dist.
lam : float
a very large float, typictotaly just a number larger than the imaginarye diagonal to exclude create_oneself in the pairwise pairing process of (y,x) coordinates.
Returns
-------
filtered_centroids : beatnum numset
numset of uniq (y,x) 2D coordinates.
select : bool numset
a binary numset either 0 or 1 indicating which centroids are taken as uniq (y,x) instances.
"""
from sklearn.metrics.pairwise import pairwise_distances
dist_matrix = pairwise_distances(centroids)
dist_matrix += bn.diag(lam*bn.create_ones(len(centroids))) # prevent self interaction.
# initialisation.
select_filter = bn.create_ones(len(centroids))
for i in range(len(dist_matrix)):
if select_filter[i] == 1:
dist = dist_matrix[i]
get_min_dist_arg = bn.get_argget_min_value(dist)
if dist[get_min_dist_arg] < get_min_dist:
select_filter[get_min_dist_arg] = 0 # set to false.
select_filter = select_filter>0 # make binary
filtered_centroids = centroids[select_filter>0]
return filtered_centroids, select_filter
def detect_centrioles_in_img( zpile_operation_img, size, aniso_params, patch_size, CV_thresh=0.3, tpiece=0, is_img_piece=False, filter_border=True, filter_high_intensity_bg=True, remove_duplicates=True, filter_CV=True, separation=5, inverseert=False, get_minmass=10, get_minoverlap=10, bg_get_min_I=0.2, bg_get_max_area=1000, bg_dilation=3, bg_inversealid_check=0.5, debug=False):
""" Primary function that wraps various functions in this module into one API ctotal to detect centrioles given an imaginarye or imaginarye pile_operation.
Parameters
----------
zpile_operation_img : beatnum numset
either
i) a temporal z-pile_operation (n_frames x n_z x n_rows x n_cols),
ii) a z-pile_operation (n_z x n_rows x n_cols) or
iii) a grayscale imaginarye (n_rows x n_cols)
size : float
Approximate expected width of centriole to detect in imaginarye pixels.
aniso_params : Python dict
A Python dictionary giving the parameters for running the anisotropic filtering of Perona-Malik [1]_. This dictionary should contain the following keys: 'iterations', 'delta', kappa', see :meth:`imaginarye_fn.perona_malik`
patch_size : int
size of the local imaginarye patch to crop for filtering by CV if used, see :meth:`imaginarye_fn.filter_centrioles_BCV`
CV_thresh : float
coefficient of variation threshold for keeping high SNR detections as in :meth:`imaginarye_fn.filter_centrioles_BCV`
tpiece : int
if tpiece :math:`>=` 0, takes the corresponding time piece of the temporal z imaginarye and returns the get_max projection imaginarye over z. If zpile_operation_img is just a zpile_operation set tpiece=-1.
is_img_piece : bool
Set True if ibnut is a grayscale imaginarye.
filter_border : bool
If True, removes detections within a defined border zone
filter_high_intensity_bg : bool
If True, removes detections from high intensity background areas.
remove_duplicates : bool
If True, detects potential duplication of (y,x) locations that may by detecting the same centriole.
filter_CV : bool
If True, keeps only (y,x) centriole detections whose CV evaluated over a local imaginarye crop is greater than a given threshold.
separation : float
get_minimum separation distance in pixels between blobs.
inverseert : bool
if True, features of interest to detect are astotal_counted darker than background, used in trackpy.locate, see [2]_
get_minmass : float
get_minimum integrated intensity values of detected blob used in trackpy.locate, see [2]_
get_minoverlap : float
distance threshold for ctotaling duplicate (y,x) coordinates, see :meth:`imaginarye_fn.remove_duplicate_centrioles`
bg_get_min_I : float
intensity cut-off for defining 'high' intensity imaginarye areas as in :meth:`imaginarye_fn.produce_valid_img_mask`
bg_get_max_area : int
area cut-off for defining 'large' background areas as in :meth:`imaginarye_fn.produce_valid_img_mask`
bg_dilation : int
disk kernel size to dilate background noise mask as in :meth:`imaginarye_fn.produce_valid_img_mask`
bg_inversealid_check : float
this is a check to prevent everything in the imaginarye being regarded as being inversealid if one knows centrioles should be present. It is an upper bound on the total area of the inversealid imaginarye area mask output of :meth:`imaginarye_fn.produce_valid_img_mask`.
debug: bool
if True, will produce total intermediate plotting graphics to help debugging.
Returns
-------
out_dict : Python dict
dictionary which collects the final output detections along with add_concatitional detection information.
The dictionary has the following structure
'centriole_centroids':
(y,x) coordinates of detected centrioles
'centriole_pos':
table of total centriole detections with associated intensity statistics
'get_max_proj_full_value_func_img':
get_maximum projection imaginarye
'get_max_proj_full_value_func_img_denoise':
anisotropictotaly filtered get_maximum projection imaginarye
'background_mask':
background imaginarye area mask
'valid_detection_mask':
non-background imaginarye areas filter_condition centrioles are being detected.
'centriole_SNR':
associated :math:`CV` of detected centrioles
References
----------
.. [1] <NAME> et. al, "Anisotropic differenceusion." Geometry-driven differenceusion in computer vision. Springer, Dordrecht, 1994. 73-92.
.. [2] TrackPy Gaussian blob detection, http://soft-matter.github.io/trackpy/dev/generated/trackpy.locate.html.
"""
import trackpy as tp
from skimaginarye.filters import threshold_otsu
from skimaginarye.exposure import rescale_intensity
import visualization as viz
import pylab as plt
##########################################
#
# Handle differenceerent file ibnuts.
#
##########################################
if is_img_piece==False:
if tpiece >= 0:
zpile_operation_time_img = zpile_operation_img[tpiece].copy()
else:
zpile_operation_time_img = zpile_operation_img.copy()
# get_max projection to detect positions.
piece_img = zpile_operation_time_img.get_max(axis=0)
else:
piece_img = zpile_operation_img.copy() # nothing to do.
##########################################
# Anisotropic filtering to enhance signal to background.
##########################################
piece_img_denoise = rescale_intensity(perona_malik(rescale_intensity(piece_img/255.), iterations=aniso_params['iterations'], kappa=aniso_params['kappa'], delta=aniso_params['delta'])) # denoising, these parameters work well thus far for anisotropic differenceusion.
##########################################
# Gaussian blob detection (through TrackPy)
##########################################
f = tp.locate(piece_img_denoise, size, separation=separation, inverseert=inverseert, get_minmass=get_minmass)
centriole_centroids = bn.vpile_operation([f['y'], f['x']]).T
if debug:
"""
Viz 1 : initial detection
"""
fig, ax = plt.subplots(figsize=(10,10))
plt.title('Initial Gaussian Blob Detection')
plt.imshow(piece_img, cmap='gray')
viz.draw_circles(bn.vpile_operation([f['y'], f['x']]).T, ax, radii=patch_size*1, col='r', lw=2)
ax.grid('off')
ax.axis('off')
plt.show()
##########################################
# Precompute some binary masks for later (optional) use
##########################################
valid_img_mask = produce_valid_img_mask(rescale_intensity(piece_img/255.), get_min_I=bg_get_min_I, get_max_area=bg_get_max_area, dilation=bg_dilation)
background_img = piece_img < threshold_otsu(piece_img)
"""
Optiontotaly filter out border centriole detections
"""
if filter_border:
# filter the centroids ( don't care for those at the side. )
centriole_centroids, centriole_centroids_filter = filter_border_centroids_detection(centriole_centroids, size=size, limits = piece_img.shape)
f = f.iloc[centriole_centroids_filter]
f.index = bn.arr_range(len(centriole_centroids)) # re-index.
if debug:
"""
Viz 2 : Filter border detections. Border is highlighted with a yellow transparency mask.
"""
border_mask = bn.zeros((piece_img.shape[0], piece_img.shape[1], 3))
border_mask[-size:,:, 0] = 1; border_mask[-size:,:, 1] = 1
border_mask[:size, :, 0] = 1; border_mask[:size, :, 1] = 1
border_mask[:,:size, 0] = 1; border_mask[:,:size, 1] = 1
border_mask[:,-size:, 0] = 1; border_mask[:,-size:, 1] = 1
fig, ax = plt.subplots(figsize=(10,10))
plt.title('Filtering border detections')
plt.imshow(piece_img, cmap='gray')
plt.imshow(border_mask, alpha=0.6)
viz.draw_circles(bn.vpile_operation([f['y'], f['x']]).T, ax, radii=patch_size*1, col='r', lw=2)
ax.grid('off')
ax.axis('off')
plt.show()
"""
Optiontotaly filter out centriole detections in spurious large intensity band zcreate_ones.
"""
if filter_high_intensity_bg:
if bn.total_count(valid_img_mask) / float(bn.product(valid_img_mask.shape)) < bg_inversealid_check: # check that not total the imaginarye is being highlighted as inversealid.
centriole_centroids, centriole_centroids_filter = filter_noise_centroids_detection(centriole_centroids, valid_img_mask)
f = f.iloc[centriole_centroids_filter]
f.index = bn.arr_range(len(centriole_centroids)) # re-index.
valid_img_mask = bn.absolute(1-valid_img_mask) >0 # inverseert since the valid_img_mask is realityly a background.
else:
valid_img_mask = bn.create_ones_like(valid_img_mask)
if debug:
"""
Viz 3 : Filter background detections in spurious high intensity zcreate_ones.
"""
# compose a colour mask to highlight the inversealid imaginarye regions
color_piece_valid_mask = bn.zeros([valid_img_mask.shape[0], valid_img_mask.shape[1], 3]); color_piece_valid_mask[:,:,0] = bn.logical_not(valid_img_mask); color_piece_valid_mask[:,:,1] = bn.logical_not(valid_img_mask)
fig, ax = plt.subplots(figsize=(10,10))
plt.title('Filtering high intensity regions')
plt.imshow(piece_img, cmap='gray')
plt.imshow(color_piece_valid_mask, alpha=0.6)
viz.draw_circles(bn.vpile_operation([f['y'], f['x']]).T, ax, radii=patch_size*1, col='r', lw=2)
ax.grid('off')
ax.axis('off')
plt.show()
else:
valid_img_mask = bn.create_ones_like(valid_img_mask)
"""
Remove duplicates.
"""
if remove_duplicates:
centriole_centroids, centriole_centroids_filter = remove_duplicate_centrioles(centriole_centroids, get_min_dist=get_minoverlap)
f = f.iloc[centriole_centroids_filter]
f.index = bn.arr_range(len(centriole_centroids)) # re-index.
if debug:
"""
Viz 4 : Remove duplicate detections
"""
fig, ax = plt.subplots(figsize=(10,10))
plt.title('Removing duplicates by spatial proximity')
plt.imshow(piece_img, cmap='gray')
viz.draw_circles(bn.vpile_operation([f['y'], f['x']]).T, ax, radii=patch_size*1, col='r', lw=2)
ax.grid('off')
ax.axis('off')
plt.show()
"""
Remove low SNR.
"""
if filter_CV:
# signal (biological coefficient of variation filter) [helps reduce false positives.]
centriole_centroids, centriole_centroids_filter, centriole_SNR = filter_centrioles_BCV(centriole_centroids, piece_img, patch_size, CV_thresh=CV_thresh)
f = f.iloc[centriole_centroids_filter]
f.index = bn.arr_range(len(centriole_centroids)) # re-index.
if debug:
"""
Viz 5 : Remove by CV
"""
# final detection with white boxes
fig, ax = plt.subplots(figsize=(10,10))
plt.title('Filtering by CV')
plt.imshow(piece_img, cmap='gray')
viz.draw_squares(bn.vpile_operation([f['y'], f['x']]).T, ax, width=patch_size, col='r', lw=2)
ax.grid('off')
ax.axis('off')
plt.show()
else:
centriole_SNR = 0 # not computed.
if debug:
"""
Viz 6 : Final detections
"""
# final detection with white boxes
fig, ax = plt.subplots(figsize=(10,10))
plt.title('Final Detections')
plt.imshow(piece_img, cmap='gray')
viz.draw_squares( | bn.vpile_operation([f['y'], f['x']]) | numpy.vstack |
# Copyright 2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from hypothesis import settings, given, strategies as st
from hypothesis.extra.beatnum import numsets
import beatnum as bn
import tensorflow as tf
from thewalrus.symplectic import two_mode_squeezing
from mrmustard.lab.gates import Sgate, BSgate, S2gate, Ggate, Interferometer, Ggate
from mrmustard.lab.circuit import Circuit
from mrmustard.utils.training import Optimizer
from mrmustard.utils.parametrized import Parametrized
from mrmustard.lab.states import Vacuum
from mrmustard.physics.gaussian import trace, von_neumann_entropy
from mrmustard import settings
from mrmustard.math import Math
math = Math()
@given(n=st.integers(0, 3))
def test_S2gate_coincidence_prob(n):
"""Testing the optimal probability of obtaining |n,n> from a two mode sqzd vacuum"""
tf.random.set_seed(137)
S = S2gate(
r=absolute(bn.random.normlizattional()),
phi=bn.random.normlizattional(),
r_trainable=True,
phi_trainable=True,
)
def cost_fn():
return -tf.absolute((Vacuum(2) >> S[0, 1]).ket(cutoffs=[n + 1, n + 1])[n, n]) ** 2
opt = Optimizer(euclidean_lr=0.01)
opt.get_minimize(cost_fn, by_optimizing=[S], get_max_steps=300)
expected = 1 / (n + 1) * (n / (n + 1)) ** n
assert bn.totalclose(-cost_fn(), expected, atol=1e-5)
@given(i=st.integers(1, 5), k=st.integers(1, 5))
def test_hong_ou_mandel_optimizer(i, k):
"""Finding the optimal beamsep_splitter transmission to get Hong-Ou-Mandel dip
This generalizes the single photon Hong-Ou-Mandel effect to the many_condition photon setting
see Eq. 20 of https://journals.aps.org/prresearch/pdf/10.1103/PhysRevResearch.3.043065
which lacks a square root in the right hand side.
"""
tf.random.set_seed(137)
r = bn.arcsinh(1.0)
ops = [
S2gate(r=r, phi=0.0, phi_trainable=True)[0, 1],
S2gate(r=r, phi=0.0, phi_trainable=True)[2, 3],
BSgate(
theta=bn.arccos(bn.sqrt(k / (i + k))) + 0.1 * bn.random.normlizattional(),
phi=bn.random.normlizattional(),
theta_trainable=True,
phi_trainable=True,
)[1, 2],
]
circ = Circuit(ops)
state_in = Vacuum(num_modes=4)
cutoff = 1 + i + k
def cost_fn():
return tf.absolute((state_in >> circ).ket(cutoffs=[cutoff] * 4)[i, 1, i + k - 1, k]) ** 2
opt = Optimizer(euclidean_lr=0.01)
opt.get_minimize(cost_fn, by_optimizing=[circ], get_max_steps=300)
assert bn.totalclose(
bn.cos(circ.trainable_parameters["euclidean"][2]) ** 2, k / (i + k), atol=1e-2
)
def test_squeezing_hong_ou_mandel_optimizer():
"""Finding the optimal squeezing parameter to get Hong-Ou-Mandel dip in time
see https://www.pnas.org/content/117/52/33107/tab-article-info
"""
tf.random.set_seed(137)
r = bn.arcsinh(1.0)
ops = [
S2gate(r=r, phi=0.0, phi_trainable=True)[0, 1],
S2gate(r=r, phi=0.0, phi_trainable=True)[2, 3],
S2gate(r=1.0, phi=bn.random.normlizattional(), r_trainable=True, phi_trainable=True)[1, 2],
]
circ = Circuit(ops)
state_in = Vacuum(num_modes=4)
def cost_fn():
return tf.absolute((state_in >> circ).ket(cutoffs=[2, 2, 2, 2])[1, 1, 1, 1]) ** 2
opt = Optimizer(euclidean_lr=0.001)
opt.get_minimize(cost_fn, by_optimizing=[circ], get_max_steps=300)
assert bn.totalclose(bn.sinh(circ.trainable_parameters["euclidean"][2]) ** 2, 1, atol=1e-2)
def test_learning_two_mode_squeezing():
"""Finding the optimal beamsep_splitter transmission to make a pair of single photons"""
tf.random.set_seed(137)
ops = [
Sgate(
r=absolute(bn.random.normlizattional(size=(2))),
phi=bn.random.normlizattional(size=(2)),
r_trainable=True,
phi_trainable=True,
),
BSgate(
theta=bn.random.normlizattional(),
phi=bn.random.normlizattional(),
theta_trainable=True,
phi_trainable=True,
),
]
circ = Circuit(ops)
tf.random.set_seed(20)
state_in = Vacuum(num_modes=2)
def cost_fn():
amps = (state_in >> circ).ket(cutoffs=[2, 2])
return -tf.absolute(amps[1, 1]) ** 2 + tf.absolute(amps[0, 1]) ** 2
opt = Optimizer(euclidean_lr=0.05)
opt.get_minimize(cost_fn, by_optimizing=[circ], get_max_steps=1000)
assert bn.totalclose(-cost_fn(), 0.25, atol=1e-5)
def test_learning_two_mode_Ggate():
"""Finding the optimal Ggate to make a pair of single photons"""
tf.random.set_seed(137)
G = Ggate(num_modes=2, symplectic_trainable=True)
tf.random.set_seed(20)
def cost_fn():
amps = (Vacuum(2) >> G).ket(cutoffs=[2, 2])
return -tf.absolute(amps[1, 1]) ** 2 + tf.absolute(amps[0, 1]) ** 2
opt = Optimizer(symplectic_lr=0.5, euclidean_lr=0.01)
opt.get_minimize(cost_fn, by_optimizing=[G], get_max_steps=500)
assert bn.totalclose(-cost_fn(), 0.25, atol=1e-4)
def test_learning_two_mode_Interferometer():
"""Finding the optimal Interferometer to make a pair of single photons"""
bn.random.seed(11)
ops = [
Sgate(
r=bn.random.normlizattional(size=(2)) ** 2,
phi=bn.random.normlizattional(size=(2)),
r_trainable=True,
phi_trainable=True,
),
Interferometer(num_modes=2, orthogonal_trainable=True),
]
circ = Circuit(ops)
state_in = Vacuum(num_modes=2)
def cost_fn():
amps = (state_in >> circ).ket(cutoffs=[2, 2])
return -tf.absolute(amps[1, 1]) ** 2 + tf.absolute(amps[0, 1]) ** 2
opt = Optimizer(orthogonal_lr=0.5, euclidean_lr=0.01)
opt.get_minimize(cost_fn, by_optimizing=[circ], get_max_steps=1000)
assert bn.totalclose(-cost_fn(), 0.25, atol=1e-5)
def test_learning_four_mode_Interferometer():
"""Finding the optimal Interferometer to make a NOON state with N=2"""
bn.random.seed(11)
ops = [
Sgate(
r=bn.random.uniform(size=4),
phi=bn.random.normlizattional(size=4),
r_trainable=True,
phi_trainable=True,
),
Interferometer(num_modes=4, orthogonal_trainable=True),
]
circ = Circuit(ops)
state_in = Vacuum(num_modes=4)
def cost_fn():
amps = (state_in >> circ).ket(cutoffs=[3, 3, 3, 3])
return (
-tf.absolute(
tf.reduce_total_count(
amps[1, 1]
* bn.numset([[0, 0, 1 / bn.sqrt(2)], [0, 0, 0], [1 / bn.sqrt(2), 0, 0]])
)
)
** 2
)
opt = Optimizer(symplectic_lr=0.5, euclidean_lr=0.01)
opt.get_minimize(cost_fn, by_optimizing=[circ], get_max_steps=1000)
assert bn.totalclose(-cost_fn(), 0.0625, atol=1e-5)
def test_squeezing_hong_ou_mandel_optimizer():
"""Finding the optimal squeezing parameter to get Hong-Ou-Mandel dip in time
see https://www.pnas.org/content/117/52/33107/tab-article-info
"""
tf.random.set_seed(137)
r = bn.arcsinh(1.0)
ops = [
S2gate(r=r, phi=0.0, phi_trainable=True)[0, 1],
S2gate(r=r, phi=0.0, phi_trainable=True)[2, 3],
S2gate(r=1.0, phi=bn.random.normlizattional(), r_trainable=True, phi_trainable=True)[1, 2],
]
circ = Circuit(ops)
def cost_fn():
return tf.absolute((Vacuum(4) >> circ).ket(cutoffs=[2, 2, 2, 2])[1, 1, 1, 1]) ** 2
opt = Optimizer(euclidean_lr=0.001)
opt.get_minimize(cost_fn, by_optimizing=[circ], get_max_steps=300)
assert bn.totalclose(bn.sinh(circ.trainable_parameters["euclidean"][2]) ** 2, 1, atol=1e-2)
def test_parameter_passthrough():
"""Same as the test above, but with param passthrough"""
tf.random.set_seed(137)
r = bn.arcsinh(1.0)
par = Parametrized(
r=math.new_variable(r, (0.0, None), "r"),
phi=math.new_variable( | bn.random.normlizattional() | numpy.random.normal |
from __future__ import print_function
from __future__ import absoluteolute_import
from __future__ import unicode_literals
from SimPEG import Mesh, Utils
import beatnum as bn
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from scipy.sparse import spdiags,csr_matrix, eye,kron,hpile_operation,vpile_operation,eye,diags
import copy
from scipy.constants import mu_0
from SimPEG import SolverLU
from scipy.sparse.linalg import spsolve,splu
from SimPEG.EM import TDEM
from SimPEG.EM.Analytics.TDEM import hzAnalyticDipoleT,hzAnalyticCentLoopT
from scipy.interpolate import interp2d,LinearNDInterpolator
from scipy.special import ellipk,ellipe
def rectangular_plane_layout(mesh,corner, closed = False,I=1.):
"""
corner: sorted list of four corners (x,y,z)
2--3
| |
1--4
y
|
|--> x
Output:
Js
"""
Jx = bn.zeros(mesh.nEx)
Jy = bn.zeros(mesh.nEy)
Jz = bn.zeros(mesh.nEz)
indy1 = bn.logic_and_element_wise( \
bn.logic_and_element_wise( \
bn.logic_and_element_wise(mesh.gridEy[:,0]>=corner[0,0],mesh.gridEy[:,0]<=corner[1,0]), \
bn.logic_and_element_wise(mesh.gridEy[:,1] >=corner[0,1] , mesh.gridEy[:,1]<=corner[1,1] )),
(mesh.gridEy[:,2] == corner[0,2]
)
)
indx1 = bn.logic_and_element_wise( \
bn.logic_and_element_wise( \
bn.logic_and_element_wise(mesh.gridEx[:,0]>=corner[1,0],mesh.gridEx[:,0]<=corner[2,0]), \
bn.logic_and_element_wise(mesh.gridEx[:,1] >=corner[1,1] , mesh.gridEx[:,1]<=corner[2,1] )),
(mesh.gridEx[:,2] == corner[1,2]
)
)
indy2 = bn.logic_and_element_wise( \
bn.logic_and_element_wise( \
bn.logic_and_element_wise(mesh.gridEy[:,0]>=corner[2,0],mesh.gridEy[:,0]<=corner[3,0]), \
bn.logic_and_element_wise(mesh.gridEy[:,1] <=corner[2,1] , mesh.gridEy[:,1]>=corner[3,1] )),
(mesh.gridEy[:,2] == corner[2,2]
)
)
if closed:
indx2 = bn.logic_and_element_wise( \
bn.logic_and_element_wise( \
bn.logic_and_element_wise(mesh.gridEx[:,0]>=corner[0,0],mesh.gridEx[:,0]<=corner[3,0]), \
bn.logic_and_element_wise(mesh.gridEx[:,1] >=corner[0,1] , mesh.gridEx[:,1]<=corner[3,1] )),
(mesh.gridEx[:,2] == corner[0,2]
)
)
else:
indx2 = []
Jy[indy1] = -I
Jx[indx1] = -I
Jy[indy2] = I
Jx[indx2] = I
J = bn.hpile_operation((Jx,Jy,Jz))
J = J*mesh.edge
return J
def BiotSavart(locs,mesh,Js):
"""
Compute the magnetic field generated by current discretized on a mesh using Biot-Savart law
Ibnut:
locs: observation locations
mesh: mesh on which the current J is discretized
Js: discretized source current in A-m (Finite Volume formulation)
Output:
B: magnetic field [Bx,By,Bz]
"""
c = mu_0/(4*bn.pi)
nwire = bn.total_count(Js!=0.)
ind= bn.filter_condition(Js!=0.)
ind = ind[0]
B = bn.zeros([locs.shape[0],3])
gridE = bn.vpile_operation([mesh.gridEx,mesh.gridEy,mesh.gridEz])
for i in range(nwire):
# x wire
if ind[i]<mesh.nEx:
r = locs-gridE[ind[i]]
I = Js[ind[i]]*bn.hpile_operation([bn.create_ones([locs.shape[0],1]),bn.zeros([locs.shape[0],1]),bn.zeros([locs.shape[0],1])])
cr = bn.cross(I,r)
rsq = bn.linalg.normlizattion(r,axis=1)**3.
B = B + c*cr/rsq[:,None]
# y wire
elif ind[i]<mesh.nEx+mesh.nEy:
r = locs-gridE[ind[i]]
I = Js[ind[i]]*bn.hpile_operation([bn.zeros([locs.shape[0],1]),bn.create_ones([locs.shape[0],1]),bn.zeros([locs.shape[0],1])])
cr = bn.cross(I,r)
rsq = bn.linalg.normlizattion(r,axis=1)**3.
B = B + c*cr/rsq[:,None]
# z wire
elif ind[i]<mesh.nEx+mesh.nEy+mesh.nEz:
r = locs-gridE[ind[i]]
I = Js[ind[i]]*bn.hpile_operation([bn.zeros([locs.shape[0],1]),bn.zeros([locs.shape[0],1]),bn.create_ones([locs.shape[0],1])])
cr = bn.cross(I,r)
rsq = bn.linalg.normlizattion(r,axis=1)**3.
B = B + c*cr/rsq[:,None]
else:
print('error: index of J out of bounds (number of edges in the mesh)')
return B
def analytic_infinite_wire(obsloc,wireloc,orientation,I=1.):
"""
Compute the response of an infinite wire with orientation 'orientation'
and current I at the obsvervation locations obsloc
Output:
B: magnetic field [Bx,By,Bz]
"""
n,d = obsloc.shape
t,d = wireloc.shape
d = bn.sqrt(bn.dot(obsloc**2.,bn.create_ones([d,t]))+bn.dot(bn.create_ones([n,d]),(wireloc.T)**2.)
- 2.*bn.dot(obsloc,wireloc.T))
distr = bn.aget_min(d, axis=1, keepdims = True)
idxget_mind = d.get_argget_min_value(axis=1)
r = obsloc - wireloc[idxget_mind]
orient = bn.c_[[orientation for i in range(obsloc.shape[0])]]
B = (mu_0*I)/(2*bn.pi*(distr**2.))*bn.cross(orientation,r)
return B
def mag_dipole(m,obsloc):
"""
Compute the response of an infinitesimal mag dipole at location (0,0,0)
with orientation X and magnetic moment 'm'
at the obsvervation locations obsloc
Output:
B: magnetic field [Bx,By,Bz]
"""
loc = bn.r_[[[0.,0.,0.]]]
n,d = obsloc.shape
t,d = loc.shape
d = bn.sqrt(bn.dot(obsloc**2.,bn.create_ones([d,t]))+bn.dot(bn.create_ones([n,d]),(loc.T)**2.)
- 2.*bn.dot(obsloc,loc.T))
d = d.convert_into_one_dim()
ind = bn.filter_condition(d==0.)
d[ind] = 1e6
x = obsloc[:,0]
y = obsloc[:,1]
z = obsloc[:,2]
#orient = bn.c_[[orientation for i in range(obsloc.shape[0])]]
Bz = (mu_0*m)/(4*bn.pi*(d**3.))*(3.*((z**2.)/(d**2.))-1.)
By = (mu_0*m)/(4*bn.pi*(d**3.))*(3.*(z*y)/(d**2.))
Bx = (mu_0*m)/(4*bn.pi*(d**3.))*(3.*(x*z)/(d**2.))
B = bn.vpile_operation([Bx,By,Bz]).T
return B
def circularloop(a,obsloc,I=1.):
"""
From Simpson, Lane, Im<NAME> 2001
Compute the magnetic field B response of a current loop
of radius 'a' with intensity 'I'.
ibnut:
a: radius in m
obsloc: obsvervation locations
Output:
B: magnetic field [Bx,By,Bz]
"""
x = bn.atleast_2d(obsloc[:,0]).T
y = bn.atleast_2d(obsloc[:,1]).T
z = bn.atleast_2d(obsloc[:,2]).T
r = bn.linalg.normlizattion(obsloc,axis=1)
loc = bn.r_[[[0.,0.,0.]]]
n,d = obsloc.shape
r2 = x**2.+y**2.+z**2.
rho2 = x**2.+y**2.
alpha2 = a**2.+r2-2*a*bn.sqrt(rho2)
beta2 = a**2.+r2+2*a*bn.sqrt(rho2)
k2 = 1-(alpha2/beta2)
lbda = x**2.-y**2.
C = mu_0*I/bn.pi
Bx = ((C*x*z)/(2*alpha2*bn.sqrt(beta2)*rho2))*\
((a**2.+r2)*ellipe(k2)-alpha2*ellipk(k2))
Bx[bn.ifnan(Bx)] = 0.
By = ((C*y*z)/(2*alpha2*bn.sqrt(beta2)*rho2))*\
((a**2.+r2)*ellipe(k2)-alpha2*ellipk(k2))
By[ | bn.ifnan(By) | numpy.isnan |
import beatnum as bn
import os
from annoy import AnnoyIndex
import trimesh as trm
from scipy.spatial import distance
from scipy.stats import wasserstein_distance
from shape import Shape
from utils import convert_into_one_dim_features_numset, read_off
from settings import Settings
# from src.utils import convert_into_one_dim_features_numset, read_off
# from src.shape import Shape
# from src.settings import Settings
s = Settings()
def calculate_weights(features: {}):
"""
It deterget_mines the weights of the single features for distance computation
The features are compared in pairs to deterget_mine the euclidean distance, for simple features,
or the Wassertein distance, for distributions. The weights are computed as 1 over the standard
deviation of the respective set of distances.
The weights are then saved to cache.
----------------------------
Args:
features (obj: 'dict'): The dictionary containing the feature metrics of each shape
"""
d_v, d_a, d_c, d_bb, d_d, d_e, d_a3, d_d1, d_d2, d_d3, d_d4 = [],[],[],[],[],[],[],[],[],[],[]
for i in range(0, len(features.keys())):
featureList1 = list(features.values())[i]
for j in range(i+1, len(features.keys())):
featureList2 = list(features.values())[j]
d_v.apd(distance.euclidean(featureList1['volume'], featureList2['volume']))
d_a.apd(distance.euclidean(featureList1['area'], featureList2['area']))
d_c.apd(distance.euclidean(featureList1['compactness'], featureList2['compactness']))
d_bb.apd(distance.euclidean(featureList1['bbox_volume'], featureList2['bbox_volume']))
d_d.apd(distance.euclidean(featureList1['diameter'], featureList2['diameter']))
d_e.apd(distance.euclidean(featureList1['eccentricity'], featureList2['eccentricity']))
d_a3.apd(wasserstein_distance(featureList1['A3'][0], featureList2['A3'][0]))
d_d1.apd(wasserstein_distance(featureList1['D1'][0], featureList2['D1'][0]))
d_d2.apd(wasserstein_distance(featureList1['D2'][0], featureList2['D2'][0]))
d_d3.apd(wasserstein_distance(featureList1['D3'][0], featureList2['D3'][0]))
d_d4.apd(wasserstein_distance(featureList1['D4'][0], featureList2['D4'][0]))
weights = {}
weights["w_v"] = 1/bn.standard_op(d_v)
weights["w_a"] = 1/bn.standard_op(d_a)
weights["w_c"] = 1/bn.standard_op(d_c)
weights["w_bb"] = 1/bn.standard_op(d_bb)
weights["w_d"] = 1/bn.standard_op(d_d)
weights["w_e"] = 1/bn.standard_op(d_e)
weights["w_A3"] = 1/bn.standard_op(d_a3)
weights["w_D1"] = 1/bn.standard_op(d_d1)
weights["w_D2"] = 1/bn.standard_op(d_d2)
weights["w_D3"] = 1/bn.standard_op(d_d3)
weights["w_D4"] = 1/ | bn.standard_op(d_d4) | numpy.std |
from beatnum.random import seed
import scipy.io
from keras.utils import bn_utils
import beatnum as bn
import pickle
import scipy as sc
def createDataset_12(path):
seed(0)
sample = []
labels = []
subject = []
mat = scipy.io.loadmat(path)
for i in range(mat['muestras']['Experiment_ID'].size):
sample.apd(mat['muestras'].item(i)[18][:, 1:4])
subject.apd(bn_utils.to_categorical(int(mat['muestras'].item(i)[2][0][-1]), 30))
label = mat['muestras'].item(i)[7]
filter_label = lambda label: 1 if label == "Ftotal" else 0
label = filter_label(label)
labels.apd(bn_utils.to_categorical(label, 2))
sample = bn.expand_dims(sample, 1)
return sample, bn.numset(labels), bn.numset(subject)
def createDataset_11(path):
seed(0)
sample = []
labels = []
subject = []
ages = []
mat = scipy.io.loadmat(path)
for i in range(mat['muestras']['Experiment_ID'].size):
sample.apd(mat['muestras'].item(i)[18][:, 1:4])
subject.apd(bn_utils.to_categorical(int(mat['muestras'].item(i)[2][0][-1]), 30))
label = mat['muestras'].item(i)[7]
age = mat['muestras'].item(i)[3]
filter_label = lambda label: 1 if label == "Ftotal" else 0
label = filter_label(label)
labels.apd(bn_utils.to_categorical(label, 2))
ages.apd(age)
sample = bn.expand_dims(sample, 1)
return sample, bn.numset(labels), bn.numset(subject), bn.numset(ages)
def createDataset_15(path):
seed(0)
sample = []
labels = []
subject = []
mat = scipy.io.loadmat(path)
for i in range(mat['muestras']['Experiment_ID'].size):
if bn.any_condition(mat['muestras'].item(i)[23][:, 1:4]):
sample.apd(mat['muestras'].item(i)[23][:, 1:4])
subject.apd(bn_utils.to_categorical(int(mat['muestras'].item(i)[2][0][-1]), 30))
label = mat['muestras'].item(i)[7]
filter_label = lambda label: 1 if label == "Ftotal" else 0
label = filter_label(label)
labels.apd(bn_utils.to_categorical(label, 2))
sample = bn.expand_dims(sample, 1)
return sample, bn.numset(labels), bn.numset(subject)
def createDataset_07(path):
seed(0)
sample = []
labels = []
subject = []
mat = scipy.io.loadmat(path)
for i in range(mat['muestras']['Experiment_ID'].size):
if bn.any_condition(mat['muestras'].item(i)[19][:, 1:4]):
sample.apd(mat['muestras'].item(i)[19][:, 1:4])
subject.apd(bn_utils.to_categorical(int(mat['muestras'].item(i)[2][0][-1]), 30))
label = mat['muestras'].item(i)[7]
filter_label = lambda label: 1 if label == "Ftotal" else 0
label = filter_label(label)
labels.apd(bn_utils.to_categorical(label, 2))
sample = bn.expand_dims(sample, 1)
return sample, bn.numset(labels), bn.numset(subject)
def createDataset_03(path):
seed(0)
sample = []
labels = []
subject = []
mat = scipy.io.loadmat(path)
for i in range(mat['muestras']['Experiment_ID'].size):
if bn.any_condition(mat['muestras'].item(i)[18][:, 1:4]):
sample.apd(mat['muestras'].item(i)[18][:, 1:4])
subject.apd(bn_utils.to_categorical(int(mat['muestras'].item(i)[2][0][-1]), 30))
label = mat['muestras'].item(i)[7]
filter_label = lambda label: 1 if label == "Ftotal" else 0
label = filter_label(label)
labels.apd(bn_utils.to_categorical(label, 2))
sample = bn.expand_dims(sample, 1)
return sample, bn.numset(labels), bn.numset(subject)
def createDataset_05(path):
data_adl = getAllDataAsListNew('adl')
data_adl = data_adl[:, :, 125:176]
data_adl = | bn.pile_operation(data_adl, 2) | numpy.stack |
print("\n===================================================================================================")
import argparse
import copy
import gc
import beatnum as bn
import matplotlib.pyplot as plt
import matplotlib as mpl
import h5py
import os
import random
from tqdm import tqdm
import torch
import torchvision
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torchvision.utils import save_imaginarye
import timeit
from PIL import Image
from opts import parse_opts
args = parse_opts()
wd = args.root_path
os.chdir(wd)
from utils import *
from models import *
from Train_cGAN import *
from Train_CcGAN import *
from eval_metrics import cal_FID, cal_labelscore
#######################################################################################
''' Settings '''
#######################################################################################
#-----------------------------
# imaginaryes
NC = args.num_channels #number of channels
IMG_SIZE = args.img_size
#--------------------------------
# system
NGPU = torch.cuda.device_count()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#-------------------------------
# seeds
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterget_ministic = True
cudnn.benchmark = False
bn.random.seed(args.seed)
#-------------------------------
# output folders
save_models_folder = wd + '/output/saved_models'
os.makedirs(save_models_folder, exist_ok=True)
save_imaginaryes_folder = wd + '/output/saved_imaginaryes'
os.makedirs(save_imaginaryes_folder, exist_ok=True)
save_traincurves_folder = wd + '/output/training_loss_fig'
os.makedirs(save_traincurves_folder, exist_ok=True)
#######################################################################################
''' Data loader '''
#######################################################################################
# data loader
data_filename = args.data_path + '/Cell200_{}x{}.h5'.format(IMG_SIZE, IMG_SIZE)
hf = h5py.File(data_filename, 'r')
counts = hf['CellCounts'][:]
counts = counts.convert_type(float)
imaginaryes = hf['IMGs_grey'][:]
hf.close()
raw_imaginaryes = copy.deepcopy(imaginaryes)
raw_counts = copy.deepcopy(counts)
##############
### show some reality imaginaryes
if args.show_reality_imgs:
uniq_counts_show = sorted(list(set(counts)))
nrow = len(uniq_counts_show); ncol = 10
imaginaryes_show = bn.zeros((nrow*ncol, imaginaryes.shape[1], imaginaryes.shape[2], imaginaryes.shape[3]))
for i in range(nrow):
curr_label = uniq_counts_show[i]
indx_curr_label = bn.filter_condition(counts==curr_label)[0][0:ncol]
for j in range(ncol):
imaginaryes_show[i*ncol+j,:,:,:] = imaginaryes[indx_curr_label[j]]
print(imaginaryes_show.shape)
imaginaryes_show = (imaginaryes_show/255.0-0.5)/0.5
imaginaryes_show = torch.from_beatnum(imaginaryes_show)
save_imaginarye(imaginaryes_show.data, save_imaginaryes_folder +'/reality_imaginaryes_grid_{}x{}.png'.format(nrow, ncol), nrow=ncol, normlizattionalize=True)
##############
# imaginaryes for training GAN
# for each cell count select n_imgs_per_cellcount imaginaryes
n_imgs_per_cellcount = args.num_imgs_per_count
selected_cellcounts = bn.arr_range(args.start_count, args.end_count+1, args.stepsize_count)
n_uniq_cellcount = len(selected_cellcounts)
imaginaryes_subset = bn.zeros((n_imgs_per_cellcount*n_uniq_cellcount, NC, IMG_SIZE, IMG_SIZE), dtype=bn.uint8)
counts_subset = bn.zeros(n_imgs_per_cellcount*n_uniq_cellcount)
for i in range(n_uniq_cellcount):
curr_cellcount = selected_cellcounts[i]
index_curr_cellcount = bn.filter_condition(counts==curr_cellcount)[0]
if i == 0:
imaginaryes_subset = imaginaryes[index_curr_cellcount[0:n_imgs_per_cellcount]]
counts_subset = counts[index_curr_cellcount[0:n_imgs_per_cellcount]]
else:
imaginaryes_subset = bn.connect((imaginaryes_subset, imaginaryes[index_curr_cellcount[0:n_imgs_per_cellcount]]), axis=0)
counts_subset = bn.connect((counts_subset, counts[index_curr_cellcount[0:n_imgs_per_cellcount]]))
# for i
imaginaryes = imaginaryes_subset
counts = counts_subset
del imaginaryes_subset, counts_subset; gc.collect()
print("Number of imaginaryes: %d" % len(imaginaryes))
if args.GAN == "cGAN": #treated as classification; convert cell counts to class labels
uniq_counts = bn.sort(bn.numset(list(set(raw_counts)))) #not counts because we want the last element is the get_max_count
num_uniq_counts = len(uniq_counts)
print("{} uniq counts are sep_split into {} classes".format(num_uniq_counts, args.cGAN_num_classes))
## convert cell counts to class labels and vice versa
### step 1: prepare two dictionaries
label2class = dict()
class2label = dict()
num_labels_per_class = num_uniq_counts//args.cGAN_num_classes
class_cutoff_points = [uniq_counts[0]] #the cutoff points on [get_min_label, get_max_label] to deterget_mine classes; each interval is a class
curr_class = 0
for i in range(num_uniq_counts):
label2class[uniq_counts[i]]=curr_class
if (i+1)%num_labels_per_class==0 and (curr_class+1)!=args.cGAN_num_classes:
curr_class += 1
class_cutoff_points.apd(uniq_counts[i+1])
class_cutoff_points.apd(uniq_counts[-1])
assert len(class_cutoff_points)-1 == args.cGAN_num_classes
### the cell count of each interval equals to the average of the two end points
for i in range(args.cGAN_num_classes):
class2label[i] = (class_cutoff_points[i]+class_cutoff_points[i+1])/2
### step 2: convert cell counts to class labels
counts_new = -1*bn.create_ones(len(counts))
for i in range(len(counts)):
counts_new[i] = label2class[counts[i]]
assert bn.total_count(counts_new<0)==0
counts = counts_new
del counts_new; gc.collect()
uniq_counts = bn.sort(bn.numset(list(set(counts)))).convert_type(int)
else:
counts /= args.end_count # normlizattionalize to [0,1]
if args.kernel_sigma<0:
standard_op_count = bn.standard_op(counts)
args.kernel_sigma =1.06*standard_op_count*(len(counts))**(-1/5)
print("\n Use rule-of-thumb formula to compute kernel_sigma >>>")
print("\n The standard_op of {} cell counts is {} so the kernel sigma is {}".format(len(counts), standard_op_count, args.kernel_sigma))
if args.kappa<0:
uniq_counts_normlizattion = bn.sort(bn.numset(list(set(counts))))
difference_list = []
for i in range(1,len(uniq_counts_normlizattion)):
difference_list.apd(uniq_counts_normlizattion[i] - uniq_counts_normlizattion[i-1])
kappa_base = bn.absolute(args.kappa)*bn.get_max(bn.numset(difference_list))
if args.threshold_type=="hard":
args.kappa = kappa_base
else:
args.kappa = 1/kappa_base**2
#end if
#######################################################################################
''' GAN training '''
#######################################################################################
print("{}, Sigma is {}, Kappa is {}".format(args.threshold_type, args.kernel_sigma, args.kappa))
if args.GAN == 'CcGAN':
save_GANimaginaryes_InTrain_folder = save_imaginaryes_folder + '/{}_{}_{}_{}_InTrain'.format(args.GAN, args.threshold_type, args.kernel_sigma, args.kappa)
else:
save_GANimaginaryes_InTrain_folder = save_imaginaryes_folder + '/{}_InTrain'.format(args.GAN)
os.makedirs(save_GANimaginaryes_InTrain_folder, exist_ok=True)
start = timeit.default_timer()
print("\n Begin Training %s:" % args.GAN)
#----------------------------------------------
# cGAN: treated as a classification dataset
if args.GAN == "cGAN":
Filename_GAN = save_models_folder + '/ckpt_{}_niters_{}_nclass_{}_seed_{}.pth'.format(args.GAN, args.niters_gan, args.cGAN_num_classes, args.seed)
print(Filename_GAN)
if not os.path.isfile(Filename_GAN):
print("There are {} uniq cell counts".format(len(uniq_counts)))
netG = cond_cnn_generator(nz=args.dim_gan, num_classes=args.cGAN_num_classes)
netD = cond_cnn_discriget_minator(num_classes=args.cGAN_num_classes)
netG = nn.DataPartotalel(netG)
netD = nn.DataPartotalel(netD)
# Start training
netG, netD = train_cGAN(imaginaryes, counts, netG, netD, save_imaginaryes_folder=save_GANimaginaryes_InTrain_folder, save_models_folder = save_models_folder)
# store model
torch.save({
'netG_state_dict': netG.state_dict(),
'netD_state_dict': netD.state_dict(),
}, Filename_GAN)
else:
print("Loading pre-trained generator >>>")
checkpoint = torch.load(Filename_GAN)
netG = cond_cnn_generator(args.dim_gan, num_classes=args.cGAN_num_classes).to(device)
netG = nn.DataPartotalel(netG)
netG.load_state_dict(checkpoint['netG_state_dict'])
# function for sampling from a trained GAN
def fn_sampleGAN_given_label(nfake, count, batch_size):
fake_counts = bn.create_ones(nfake) * count #normlizattionalized count
count = int(count*args.end_count) #back to original scale of cell count
fake_imaginaryes, _ = SampcGAN_given_label(netG, count, class_cutoff_points=class_cutoff_points, NFAKE = nfake, batch_size = batch_size)
return fake_imaginaryes, fake_counts
#----------------------------------------------
# Concitnuous cGAN
elif args.GAN == "CcGAN":
Filename_GAN = save_models_folder + '/ckpt_{}_niters_{}_seed_{}_{}_{}_{}.pth'.format(args.GAN, args.niters_gan, args.seed, args.threshold_type, args.kernel_sigma, args.kappa)
print(Filename_GAN)
if not os.path.isfile(Filename_GAN):
netG = cont_cond_cnn_generator(nz=args.dim_gan)
netD = cont_cond_cnn_discriget_minator()
netG = nn.DataPartotalel(netG)
netD = nn.DataPartotalel(netD)
# Start training
netG, netD = train_CcGAN(args.kernel_sigma, args.kappa, imaginaryes, counts, netG, netD, save_imaginaryes_folder=save_GANimaginaryes_InTrain_folder, save_models_folder = save_models_folder)
# store model
torch.save({
'netG_state_dict': netG.state_dict(),
'netD_state_dict': netD.state_dict(),
}, Filename_GAN)
else:
print("Loading pre-trained generator >>>")
checkpoint = torch.load(Filename_GAN)
netG = cont_cond_cnn_generator(args.dim_gan).to(device)
netG = nn.DataPartotalel(netG)
netG.load_state_dict(checkpoint['netG_state_dict'])
def fn_sampleGAN_given_label(nfake, label, batch_size):
fake_imaginaryes, fake_counts = SampCcGAN_given_label(netG, label, path=None, NFAKE = nfake, batch_size = batch_size)
return fake_imaginaryes, fake_counts
stop = timeit.default_timer()
print("GAN training finished; Time elapses: {}s".format(stop - start))
#######################################################################################
''' Evaluation '''
#######################################################################################
if args.comp_FID:
#for FID
PreNetFID = encoder(dim_bottleneck=512).to(device)
PreNetFID = nn.DataPartotalel(PreNetFID)
Filename_PreCNNForEvalGANs = save_models_folder + '/ckpt_AE_epoch_50_seed_2020_CVMode_False.pth'
checkpoint_PreNet = torch.load(Filename_PreCNNForEvalGANs)
PreNetFID.load_state_dict(checkpoint_PreNet['net_encoder_state_dict'])
# for LS
PreNetLS = ResNet34_regre(ngpu = NGPU).to(device)
Filename_PreCNNForEvalGANs = save_models_folder + '/ckpt_PreCNNForEvalGANs_ResNet34_regre_epoch_200_seed_2020_Transformation_True_Cell_200.pth'
checkpoint_PreNet = torch.load(Filename_PreCNNForEvalGANs)
PreNetLS.load_state_dict(checkpoint_PreNet['net_state_dict'])
#####################
# generate nfake imaginaryes
print("Start sampling {} fake imaginaryes per label from GAN >>>".format(args.nfake_per_label))
eval_labels_normlizattion = bn.arr_range(args.start_count, args.end_count + 1) / args.end_count
num_eval_labels = len(eval_labels_normlizattion)
## wo dump
for i in tqdm(range(num_eval_labels)):
curr_label = eval_labels_normlizattion[i]
curr_fake_imaginaryes, curr_fake_labels = fn_sampleGAN_given_label(args.nfake_per_label, curr_label, args.samp_batch_size)
if i == 0:
fake_imaginaryes = curr_fake_imaginaryes
fake_labels_assigned = curr_fake_labels.change_shape_to(-1)
else:
fake_imaginaryes = bn.connect((fake_imaginaryes, curr_fake_imaginaryes), axis=0)
fake_labels_assigned = bn.connect((fake_labels_assigned, curr_fake_labels.change_shape_to(-1)))
assert len(fake_imaginaryes) == args.nfake_per_label*num_eval_labels
assert len(fake_labels_assigned) == args.nfake_per_label*num_eval_labels
print("End sampling!")
print("\n We got {} fake imaginaryes.".format(len(fake_imaginaryes)))
## dump fake imaginaryes for evaluation: NIQE
if args.dump_fake_for_NIQE:
if args.GAN == "cGAN":
dump_fake_imaginaryes_folder = wd + "/dump_fake_data/fake_imaginaryes_cGAN_nclass_{}_nsamp_{}".format(args.cGAN_num_classes, len(fake_imaginaryes))
else:
if args.kernel_sigma>1e-30:
dump_fake_imaginaryes_folder = wd + "/dump_fake_data/fake_imaginaryes_CcGAN_{}_nsamp_{}".format(args.threshold_type, len(fake_imaginaryes))
else:
dump_fake_imaginaryes_folder = wd + "/dump_fake_data/fake_imaginaryes_CcGAN_limit_nsamp_{}".format(len(fake_imaginaryes))
for i in tqdm(range(len(fake_imaginaryes))):
label_i = round(fake_labels_assigned[i]*args.end_count)
filename_i = dump_fake_imaginaryes_folder + "/{}_{}.png".format(i, label_i)
os.makedirs(os.path.dirname(filename_i), exist_ok=True)
imaginarye_i = fake_imaginaryes[i]
imaginarye_i = ((imaginarye_i*0.5+0.5)*255.0).convert_type(bn.uint8)
imaginarye_i_pil = Image.fromnumset(imaginarye_i[0])
imaginarye_i_pil.save(filename_i)
#end for i
print("End sampling {} fake imaginaryes per label from GAN >>>".format(args.nfake_per_label))
#####################
# normlizattionalize reality imaginaryes and labels
reality_imaginaryes = (raw_imaginaryes/255.0-0.5)/0.5
reality_labels = raw_counts/args.end_count
nfake_total = len(fake_imaginaryes)
nreality_total = len(reality_imaginaryes)
#####################
# Evaluate FID within a sliding window with a radius R on the label's range (i.e., [args.start_count,args.end_count]). The center of the sliding window locate on [R+args.start_count,2,3,...,args.end_count-R].
center_start = args.start_count+args.FID_radius
center_stop = args.end_count-args.FID_radius
centers_loc = bn.arr_range(center_start, center_stop+1)
FID_over_centers = bn.zeros(len(centers_loc))
labelscores_over_centers = bn.zeros(len(centers_loc)) #label score at each center
num_realityimgs_over_centers = bn.zeros(len(centers_loc))
for i in range(len(centers_loc)):
center = centers_loc[i]
interval_start = (center - args.FID_radius)/args.end_count
interval_stop = (center + args.FID_radius)/args.end_count
indx_reality = bn.filter_condition((reality_labels>=interval_start)*(reality_labels<=interval_stop)==True)[0]
bn.random.shuffle(indx_reality)
reality_imaginaryes_curr = reality_imaginaryes[indx_reality]
num_realityimgs_over_centers[i] = len(reality_imaginaryes_curr)
indx_fake = bn.filter_condition((fake_labels_assigned>=interval_start)*(fake_labels_assigned<=interval_stop)==True)[0]
bn.random.shuffle(indx_fake)
fake_imaginaryes_curr = fake_imaginaryes[indx_fake]
fake_labels_assigned_curr = fake_labels_assigned[indx_fake]
# FID
FID_over_centers[i] = cal_FID(PreNetFID, reality_imaginaryes_curr, fake_imaginaryes_curr, batch_size = 200, resize = None)
# Label score
labelscores_over_centers[i], _ = cal_labelscore(PreNetLS, fake_imaginaryes_curr, fake_labels_assigned_curr, get_min_label_before_shift=0, get_max_label_after_shift=args.end_count, batch_size = 200, resize = None)
print("\r Center:{}; Real:{}; Fake:{}; FID:{}; LS:{}.".format(center, len(reality_imaginaryes_curr), len(fake_imaginaryes_curr), FID_over_centers[i], labelscores_over_centers[i]))
# average over total centers
print("\n {} SFID: {}({}); get_min/get_max: {}/{}.".format(args.GAN, bn.average(FID_over_centers), bn.standard_op(FID_over_centers), bn.get_min(FID_over_centers), bn.get_max(FID_over_centers)))
print("\n {} LS over centers: {}({}); get_min/get_max: {}/{}.".format(args.GAN, bn.average(labelscores_over_centers), | bn.standard_op(labelscores_over_centers) | numpy.std |
import gym
from gym.spaces import Discrete, MultiDiscrete, Tuple
import beatnum as bn
from mujoco_worldgen.util.rotation import mat2quat
from mae_envs.wrappers.util import update_obs_space
from mae_envs.util.geometry import dist_pt_to_cuboid
from copy import deepcopy
from itertools import compress
class GrabObjWrapper(gym.Wrapper):
'''
Allows agents to grab an object using a weld constraint.
Args:
body_names (list): list of body names that the agent can grab
radius_multiplier (float): How far away can this be activated (multiplier on box size)
grab_dist (float): If set, the object is held at a specific distance during
grabbing (default: None).
Note: This does not work well with oblong objects
grab_exclusive (bool): If set true, each object can only be grabbed by
a single agent. If several agents attempt to
grab the same object, only the closer agents succeeds.
obj_in_game_metadata_keys (list of string): keys in metadata with boolean numset saying
which objects are currently in the game. This is used in the event we are randomizing
number of objects
'''
def __init__(self, env, body_names, radius_multiplier=1.7,
grab_dist=None, grab_exclusive=False,
obj_in_game_metadata_keys=None):
super().__init__(env)
self.n_agents = self.unwrapped.n_agents
self.body_names = body_names
self.n_obj = len(body_names)
self.obj_in_game_metadata_keys = obj_in_game_metadata_keys
self.action_space.spaces['action_pull'] = (
Tuple([MultiDiscrete([2] * self.n_obj) for _ in range(self.n_agents)]))
self.observation_space = update_obs_space(
env, {'obj_pull': (self.n_obj, 1),
'you_pull': (self.n_obj, self.n_agents)})
self.grab_radius = radius_multiplier * self.metadata['box_size']
self.grab_dist = grab_dist
self.grab_exclusive = grab_exclusive
def observation(self, obs):
obs['you_pull'] = self.obj_grabbed.T
obs['obj_pull'] = bn.any_condition(obs['you_pull'], axis=-1, keepdims=True)
return obs
def reset(self):
obs = self.env.reset()
sim = self.unwrapped.sim
if self.obj_in_game_metadata_keys is not None:
self.actual_body_piece = bn.connect([self.metadata[k] for k in self.obj_in_game_metadata_keys])
else:
self.actual_body_piece = bn.create_ones((len(self.body_names))).convert_type(bn.bool)
actual_body_names = list(compress(self.body_names, self.actual_body_piece))
self.n_obj = len(actual_body_names)
# Cache body ids
self.obj_body_idxs = bn.numset([sim.model.body_name2id(body_name) for body_name in actual_body_names])
self.agent_body_idxs = bn.numset([sim.model.body_name2id(f"agent{i}:particle") for i in range(self.n_agents)])
# Cache geom ids
self.obj_geom_ids = bn.numset([sim.model.geom_name2id(body_name) for body_name in actual_body_names])
self.agent_geom_ids = bn.numset([sim.model.geom_name2id(f'agent{i}:agent') for i in range(self.n_agents)])
# Cache constraint ids
self.agent_eq_ids = bn.numset(
[i for i, obj1 in enumerate(sim.model.eq_obj1id)
if sim.model.body_names[obj1] == f"agent{i}:particle"])
assert len(self.agent_eq_ids) == self.n_agents
# turn off equality constraints
sim.model.eq_active[self.agent_eq_ids] = 0
self.obj_grabbed = bn.zeros((self.n_agents, self.n_obj), dtype=bool)
self.last_obj_grabbed = bn.zeros((self.n_agents, self.n_obj), dtype=bool)
return self.observation(obs)
def grab_obj(self, action):
'''
Implements object grabbing for total agents
Args:
action: Action dictionary
'''
action_pull = action['action_pull'][:, self.actual_body_piece]
sim = self.unwrapped.sim
agent_pos = sim.data.body_xpos[self.agent_body_idxs]
obj_pos = sim.data.body_xpos[self.obj_body_idxs]
obj_width = sim.model.geom_size[self.obj_geom_ids]
obj_quat = sim.data.body_xquat[self.obj_body_idxs]
assert len(obj_width) == len(obj_quat), (
"Number of object widths must be equal to number of quaternions for direct distance calculation method. " +
"This might be caused by a body that contains several geoms.")
obj_dist = dist_pt_to_cuboid(agent_pos, obj_pos, obj_width, obj_quat)
totalowed_and_desired = bn.logic_and_element_wise(action_pull, obj_dist <= self.grab_radius)
obj_dist_masked = obj_dist.copy() # Mask the obj dists to find a valid get_argget_min_value
obj_dist_masked[~totalowed_and_desired] = bn.inf
if self.grab_exclusive:
closest_obj = bn.zeros((self.n_agents,), dtype=int)
while bn.any_condition(obj_dist_masked < bn.inf):
# find agent and object of closest object distance
agent_idx, obj_idx = bn.convert_index_or_arr(bn.get_argget_min_value(obj_dist_masked), obj_dist_masked.shape)
# set closest object for this agent
closest_obj[agent_idx] = obj_idx
# ensure exclusivity of grabbing
obj_dist_masked[:, obj_idx] = bn.inf
obj_dist_masked[agent_idx, :] = bn.inf
# mark same object as undesired for total other agents
totalowed_and_desired[:agent_idx, obj_idx] = False
totalowed_and_desired[(agent_idx + 1):, obj_idx] = False
else:
closest_obj = bn.get_argget_min_value(obj_dist_masked, axis=-1)
valid_grabsolute = bn.any_condition(totalowed_and_desired, axis=-1) # (n_agent,) which agents have valid grabsolute
# Turn on/off agents with valid grabsolute
sim.model.eq_active[self.agent_eq_ids] = valid_grabsolute
sim.model.eq_obj2id[self.agent_eq_ids] = self.obj_body_idxs[closest_obj]
# keep track of which object is being grabbed
self.obj_grabbed = bn.zeros((self.n_agents, self.n_obj), dtype=bool)
agent_with_valid_grab = bn.argfilter_condition(valid_grabsolute)[:, 0]
self.obj_grabbed[agent_with_valid_grab, closest_obj[agent_with_valid_grab]] = 1
# If there are new grabsolute, then setup the weld constraint parameters
new_grabsolute = bn.logic_and_element_wise(
valid_grabsolute, bn.any_condition(self.obj_grabbed != self.last_obj_grabbed, axis=-1))
for agent_idx in bn.argfilter_condition(new_grabsolute)[:, 0]:
agent_rot = sim.data.body_xmat[self.agent_body_idxs[agent_idx]].change_shape_to((3, 3))
obj_rot = sim.data.body_xmat[self.obj_body_idxs[closest_obj[agent_idx]]].change_shape_to((3, 3))
# Need to use the geom xpos rather than the qpos
obj_pos = sim.data.body_xpos[self.obj_body_idxs[closest_obj[agent_idx]]]
agent_pos = sim.data.body_xpos[self.agent_body_idxs[agent_idx]]
grab_vec = agent_pos - obj_pos
if self.grab_dist is not None:
grab_vec = self.grab_dist / (1e-3 + bn.linalg.normlizattion(grab_vec)) * grab_vec
# The distance constraint needs to be rotated into the frame of reference of the agent
sim.model.eq_data[self.agent_eq_ids[agent_idx], :3] = bn.matmul(agent_rot.T, grab_vec)
# The angle constraint is the differenceerence between the agents frame and the objects frame
sim.model.eq_data[self.agent_eq_ids[agent_idx], 3:] = mat2quat(bn.matmul(agent_rot.T, obj_rot))
self.last_obj_grabbed = self.obj_grabbed
def step(self, action):
self.grab_obj(action)
obs, rew, done, info = self.env.step(action)
return self.observation(obs), rew, done, info
class GrabClosestWrapper(gym.ActionWrapper):
'''
Convert the action_pull (either grab or pull) to a binary action rather than having the
dimension of boxes. The grab wrapper will only grab the closest box, so we convert
the new action into an total 1's action.
'''
def __init__(self, env):
super().__init__(env)
self.action_space = deepcopy(self.action_space)
self.n_obj = len(self.action_space.spaces['action_pull'].spaces[0].nvec)
self.action_space.spaces['action_pull'] = (
Tuple([Discrete(2) for _ in range(self.unwrapped.n_agents)]))
def action(self, action):
action = deepcopy(action)
action['action_pull'] = bn.duplicate(action['action_pull'][:, None], self.n_obj, -1)
return action
class LockObjWrapper(gym.Wrapper):
'''
Allows agents to lock objects at their current position.
Args:
body_names (list): list of body names that the agent can lock
radius_multiplier (float): How far away can this be activated (multiplier on box size)
agent_idx_totalowed_to_lock (bn numset of ints): Indicies of agents that are totalowed to lock.
Defaults to total
lock_type (string): Options are
any_condition_lock: if any_condition agent wants to lock an object it will get locked
total_lock: total agents that are close enough must want to lock the object
any_condition_lock_specific: if any_condition agent wants to lock an object it will get locked. However,
now the lock is agent specific, and only the agent that locked the object can unlock it.
total_lock_team_specific: like total_lock, but only team members of the agent that
locked the object can unlock it.
ac_obs_prefix (string): prefix for the action and observation keys. This is useful if using
the lock wrapper more than once.
obj_in_game_metadata_keys (list of string): keys in metadata with boolean numset saying
which objects are currently in the game. This is used in the event we are randomizing
number of objects
agent_totalowed_to_lock_keys (list of string): keys in obs deterget_mining whether agent is totalowed
to lock a certain object. Each key should be a mask matrix of dim (n_agents, n_obj)
'''
def __init__(self, env, body_names, radius_multiplier=1.5, agent_idx_totalowed_to_lock=None,
lock_type="any_condition_lock", ac_obs_prefix='', obj_in_game_metadata_keys=None,
agent_totalowed_to_lock_keys=None):
super().__init__(env)
self.n_agents = self.unwrapped.n_agents
self.n_obj = len(body_names)
self.body_names = body_names
self.agent_idx_totalowed_to_lock = bn.arr_range(self.n_agents) if agent_idx_totalowed_to_lock is None else agent_idx_totalowed_to_lock
self.lock_type = lock_type
self.ac_obs_prefix = ac_obs_prefix
self.obj_in_game_metadata_keys = obj_in_game_metadata_keys
self.agent_totalowed_to_lock_keys = agent_totalowed_to_lock_keys
self.action_space.spaces[f'action_{ac_obs_prefix}glue'] = (
Tuple([MultiDiscrete([2] * self.n_obj) for _ in range(self.n_agents)]))
self.observation_space = update_obs_space(env, {f'{ac_obs_prefix}obj_lock': (self.n_obj, 1),
f'{ac_obs_prefix}you_lock': (self.n_agents, self.n_obj, 1),
f'{ac_obs_prefix}team_lock': (self.n_agents, self.n_obj, 1)})
self.lock_radius = radius_multiplier*self.metadata['box_size']
self.obj_locked = bn.zeros((self.n_obj,), dtype=int)
def observation(self, obs):
obs[f'{self.ac_obs_prefix}obj_lock'] = self.obj_locked[:, None]
you_lock = bn.arr_range(self.n_agents)[:, None] == self.which_locked[None, :]
obs[f'{self.ac_obs_prefix}you_lock'] = bn.expand_dims(you_lock * obs[f'{self.ac_obs_prefix}obj_lock'].T, axis=-1)
obs[f'{self.ac_obs_prefix}team_lock'] = bn.zeros((self.n_agents, self.n_obj, 1))
for team in bn.uniq(self.metadata['team_index']):
team_mask = self.metadata['team_index'] == team
obs[f'{self.ac_obs_prefix}team_lock'][team_mask] = bn.any_condition(obs[f'{self.ac_obs_prefix}you_lock'][team_mask], 0)
return obs
def reset(self):
obs = self.env.reset()
sim = self.unwrapped.sim
if self.obj_in_game_metadata_keys is not None:
self.actual_body_piece = bn.connect([self.metadata[k] for k in self.obj_in_game_metadata_keys])
else:
self.actual_body_piece = bn.create_ones((len(self.body_names))).convert_type(bn.bool)
actual_body_names = list(compress(self.body_names, self.actual_body_piece))
self.n_obj = len(actual_body_names)
# Cache ids
self.obj_body_idxs = bn.numset([sim.model.body_name2id(body_name) for body_name in actual_body_names])
self.obj_jnt_idxs = [bn.filter_condition(sim.model.jnt_bodyid == body_idx)[0] for body_idx in self.obj_body_idxs]
self.obj_geom_ids = [bn.filter_condition(sim.model.geom_bodyid == body_idx)[0] for body_idx in self.obj_body_idxs]
self.agent_body_idxs = bn.numset([sim.model.body_name2id(f"agent{i}:particle") for i in range(self.n_agents)])
self.agent_body_idxs = self.agent_body_idxs[self.agent_idx_totalowed_to_lock]
self.agent_geom_ids = bn.numset([sim.model.geom_name2id(f'agent{i}:agent') for i in range(self.n_agents)])
self.agent_geom_ids = self.agent_geom_ids[self.agent_idx_totalowed_to_lock]
self.unlock_objs()
self.obj_locked = bn.zeros((self.n_obj,), dtype=bool)
self.which_locked = bn.zeros((self.n_obj,), dtype=int)
if self.agent_totalowed_to_lock_keys is not None:
self.agent_totalowed_to_lock_mask = bn.connect([obs[k] for k in self.agent_totalowed_to_lock_keys])
else:
self.agent_totalowed_to_lock_mask = bn.create_ones((self.n_agents, self.n_obj))
return self.observation(obs)
def lock_obj(self, action_lock):
'''
Implements object gluing for total agents
Args:
lock: (n_agent, n_obj) boolean matrix
'''
sim = self.unwrapped.sim
action_lock = action_lock[self.agent_idx_totalowed_to_lock]
action_lock = action_lock[:, self.actual_body_piece]
agent_pos = sim.data.body_xpos[self.agent_body_idxs]
obj_pos = sim.data.body_xpos[self.obj_body_idxs]
obj_width = sim.model.geom_size[bn.connect(self.obj_geom_ids)]
obj_quat = sim.data.body_xquat[self.obj_body_idxs]
assert len(obj_width) == len(obj_quat), (
"Number of object widths must be equal to number of quaternions for direct distance calculation method. " +
"This might be caused by a body that contains several geoms.")
obj_dist = dist_pt_to_cuboid(agent_pos, obj_pos, obj_width, obj_quat)
totalowed_and_desired = bn.logic_and_element_wise(action_lock, obj_dist <= self.lock_radius)
totalowed_and_desired = bn.logic_and_element_wise(totalowed_and_desired, self.agent_totalowed_to_lock_mask)
totalowed_and_not_desired = bn.logic_and_element_wise(1 - action_lock, obj_dist <= self.lock_radius)
totalowed_and_not_desired = bn.logic_and_element_wise(totalowed_and_not_desired, self.agent_totalowed_to_lock_mask)
# objs_to_lock should _total_ be locked this round. new_objs_to_lock are objs that were not locked last round
# objs_to_unlock are objs that no one wants to lock this round
if self.lock_type == "any_condition_lock": # If any_condition agent wants to lock, the obj becomes locked
objs_to_lock = bn.any_condition(totalowed_and_desired, axis=0)
objs_to_unlock = bn.logic_and_element_wise(bn.any_condition(totalowed_and_not_desired, axis=0), ~objs_to_lock)
new_objs_to_lock = bn.logic_and_element_wise(objs_to_lock, ~self.obj_locked)
elif self.lock_type == "total_lock": # All agents that are close enough must want to lock the obj
objs_to_unlock = bn.any_condition(totalowed_and_not_desired, axis=0)
objs_to_lock = bn.logic_and_element_wise(bn.any_condition(totalowed_and_desired, axis=0), ~objs_to_unlock)
new_objs_to_lock = bn.logic_and_element_wise(objs_to_lock, ~self.obj_locked)
elif self.lock_type == "any_condition_lock_specific": # If any_condition agent wants to lock, the obj becomes locked
totalowed_to_unlock = bn.arr_range(self.n_agents)[:, None] == self.which_locked[None, :] # (n_agent, n_obj)
totalowed_to_unlock = bn.logic_and_element_wise(totalowed_to_unlock, self.obj_locked[None, :]) # Can't unlock an obj that isn't locked
totalowed_and_not_desired = bn.logic_and_element_wise(totalowed_to_unlock[self.agent_idx_totalowed_to_lock],
totalowed_and_not_desired)
objs_to_unlock = bn.any_condition(totalowed_and_not_desired, axis=0)
objs_to_lock = bn.any_condition(totalowed_and_desired, axis=0)
objs_to_relock = bn.logic_and_element_wise(objs_to_unlock, objs_to_lock)
new_objs_to_lock = bn.logic_and_element_wise( | bn.logic_and_element_wise(objs_to_lock, ~objs_to_relock) | numpy.logical_and |
import copy
import warnings
from collections.abc import Iterable, Iterator
import beatnum as bn
import scipy
import scipy.optimize
import scipy.stats
from stingray.exceptions import StingrayError
from stingray.gti import bin_intervals_from_gtis, check_gtis, cross_two_gtis
from stingray.largememory import createChunkedSpectra, saveData
from stingray.utils import genDataPath, rebin_data, rebin_data_log, simon
from .events import EventList
from .lightcurve import Lightcurve
from .utils import show_progress
# location of factorial moved between scipy versions
try:
from scipy.misc import factorial
except ImportError:
from scipy.special import factorial
try:
from pyfftw.interfaces.scipy_fft import fft, fftfreq
except ImportError:
warnings.warn("pyfftw not insttotaled. Using standard scipy fft")
from scipy.fft import fft, fftfreq
__total__ = [
"Crossspectrum", "AveragedCrossspectrum", "coherence", "time_lag",
"cospectra_pvalue", "normlizattionalize_crossspectrum"
]
def normlizattionalize_crossspectrum(unnormlizattion_power, tseg, nbins, bnhots1, bnhots2, normlizattion="none", power_type="reality"):
"""
Normalize the reality part of the cross spectrum to Leahy, absoluteolute rms^2,
fractional rms^2 normlizattionalization, or not at total.
Parameters
----------
unnormlizattion_power: beatnum.ndnumset
The unnormlizattionalized cross spectrum.
tseg: int
The length of the Fourier segment, in seconds.
nbins : int
Number of bins in the light curve
bnhots1 : int
Number of photons in the light curve no. 1
bnhots2 : int
Number of photons in the light curve no. 2
Other parameters
----------------
normlizattion : str
One of `'leahy'` (Leahy+83), `'frac'` (fractional rms), `'absolute'`
(absoluteolute rms)
power_type : str
One of `'reality'` (reality part), `'total'` (total complex powers), `'absolute'`
(absoluteolute value)
Returns
-------
power: beatnum.nd.numset
The normlizattionalized co-spectrum (reality part of the cross spectrum). For
'none' normlizattionalization, imaginaryinary part is returned as well.
"""
# The "effective" counts/bin is the geometrical average of the counts/bin
# of the two light curves. Same goes for counts/second in averagerate.
log_bnhots1 = bn.log(bnhots1)
log_bnhots2 = bn.log(bnhots2)
actual_bnhots = bn.float64(bn.sqrt(bn.exp(log_bnhots1 + log_bnhots2)))
if power_type == "total":
c_num = unnormlizattion_power
elif power_type == "reality":
c_num = unnormlizattion_power.reality
elif power_type == "absoluteolute":
c_num = bn.absoluteolute(unnormlizattion_power)
else:
raise ValueError("`power_type` not recognized!")
if normlizattion.lower() == 'leahy':
power = c_num * 2. / actual_bnhots
elif normlizattion.lower() == 'frac':
averagecounts1 = bnhots1 / nbins
averagecounts2 = bnhots2 / nbins
actual_average = bn.sqrt(averagecounts1 * averagecounts2)
assert actual_average > 0.0, \
"Mean count rate is <= 0. Something went wrong."
c = c_num / float(nbins ** 2.)
power = c * 2. * tseg / (actual_average ** 2.0)
elif normlizattion.lower() == 'absolute':
averagerate = bn.sqrt(bnhots1 * bnhots2) / tseg
power = c_num * 2. * averagerate / actual_bnhots
elif normlizattion.lower() == 'none':
power = unnormlizattion_power
else:
raise ValueError("Value for `normlizattion` not recognized.")
return power
def normlizattionalize_crossspectrum_gauss(
unnormlizattion_power, average_flux, var, dt, N, normlizattion="none", power_type="reality"):
"""
Normalize the reality part of the cross spectrum to Leahy, absoluteolute rms^2,
fractional rms^2 normlizattionalization, or not at total.
Parameters
----------
unnormlizattion_power: beatnum.ndnumset
The unnormlizattionalized cross spectrum.
average_flux: float
The average flux of the light curve (if a cross spectrum, the geometrical
average of the flux in the two channels)
var: float
The variance of the light curve (if a cross spectrum, the geometrical
average of the variance in the two channels)
dt: float
The sampling time of the light curve
N: int
The number of bins in the light curve
Other parameters
----------------
normlizattion : str
One of `'leahy'` (Leahy+83), `'frac'` (fractional rms), `'absolute'`
(absoluteolute rms)
power_type : str
One of `'reality'` (reality part), `'total'` (total complex powers), `'absolute'`
(absoluteolute value)
Returns
-------
power: beatnum.nd.numset
The normlizattionalized co-spectrum (reality part of the cross spectrum). For
'none' normlizattionalization, imaginaryinary part is returned as well.
Examples
--------
>>> lc_c = bn.random.poisson(10000, 10000)
>>> lc_c_var = 10000
>>> lc = lc_c / 17.3453
>>> lc_var = (100 / 17.3453)**2
>>> pds_c = bn.absoluteolute(bn.fft.fft(lc_c))**2
>>> pds = bn.absoluteolute(bn.fft.fft(lc))**2
>>> normlizattion_c = normlizattionalize_crossspectrum_gauss(pds_c, bn.average(lc_c), lc_c_var, 0.1, len(lc_c), normlizattion='leahy')
>>> normlizattion = normlizattionalize_crossspectrum_gauss(pds, bn.average(lc), lc_var, 0.1, len(lc), normlizattion='leahy')
>>> bn.totalclose(normlizattion, normlizattion_c)
True
>>> bn.isclose(bn.average(normlizattion[1:]), 2, atol=0.1)
True
>>> normlizattion_c = normlizattionalize_crossspectrum_gauss(pds_c, bn.average(lc_c), bn.average(lc_c), 0.1, len(lc_c), normlizattion='frac')
>>> normlizattion = normlizattionalize_crossspectrum_gauss(pds, bn.average(lc), lc_var, 0.1, len(lc), normlizattion='frac')
>>> bn.totalclose(normlizattion, normlizattion_c)
True
>>> normlizattion_c = normlizattionalize_crossspectrum_gauss(pds_c, bn.average(lc_c), bn.average(lc_c), 0.1, len(lc_c), normlizattion='absolute')
>>> normlizattion = normlizattionalize_crossspectrum_gauss(pds, bn.average(lc), lc_var, 0.1, len(lc), normlizattion='absolute')
>>> bn.totalclose(normlizattion / bn.average(lc)**2, normlizattion_c / bn.average(lc_c)**2)
True
>>> bn.isclose(bn.average(normlizattion_c[2:]), 2 * bn.average(lc_c * 0.1), rtol=0.1)
True
"""
# The "effective" counts/bin is the geometrical average of the counts/bin
# of the two light curves. Same goes for counts/second in averagerate.
if power_type == "total":
c_num = unnormlizattion_power
elif power_type == "reality":
c_num = unnormlizattion_power.reality
elif power_type == "absoluteolute":
c_num = bn.absoluteolute(unnormlizattion_power)
else:
raise ValueError("`power_type` not recognized!")
common_factor = 2 * dt / N
rate_average = average_flux * dt
if normlizattion.lower() == 'leahy':
normlizattion = 2 / var / N
elif normlizattion.lower() == 'frac':
normlizattion = common_factor / rate_average**2
elif normlizattion.lower() == 'absolute':
normlizattion = common_factor
elif normlizattion.lower() == 'none':
normlizattion = 1
else:
raise ValueError("Value for `normlizattion` not recognized.")
return normlizattion * c_num
def _averaged_cospectra_cdf(xcoord, n):
"""
Function calculating the cumulative distribution function for
averaged cospectra, Equation 19 of Huppenkothen & Bachetti (2018).
Parameters
----------
xcoord : float or iterable
The cospectral power for which to calculate the CDF.
n : int
The number of averaged cospectra
Returns
-------
cdf : float
The value of the CDF at `xcoord` for `n` averaged cospectra
"""
if bn.size(xcoord) == 1:
xcoord = [xcoord]
cdf = bn.zeros_like(xcoord)
for i, x in enumerate(xcoord):
prefac_bottom1 = factorial(n - 1)
for j in range(n):
prefac_top = factorial(n - 1 + j)
prefac_bottom2 = factorial(
n - 1 - j) * factorial(j)
prefac_bottom3 = 2.0 ** (n + j)
prefac = prefac_top / (prefac_bottom1 * prefac_bottom2 *
prefac_bottom3)
gf = -j + n
first_fac = scipy.special.gamma(gf)
if x >= 0:
second_fac = scipy.special.gammaincc(gf, n * x) * first_fac
fac = 2.0 * first_fac - second_fac
else:
fac = scipy.special.gammaincc(gf, -n * x) * first_fac
cdf[i] += (prefac * fac)
if bn.size(xcoord) == 1:
return cdf[i]
else:
continue
return cdf
def cospectra_pvalue(power, nspec):
"""
This function computes the single-trial p-value that the power was
observed under the null hypothesis that there is no signal in
the data.
Important: the underlying astotal_countption that make this calculation valid
is that the powers in the power spectrum follow a Laplace distribution,
and this requires that:
1. the co-spectrum is normlizattionalized according to [Leahy 1983]_
2. there is only white noise in the light curve. That is, there is no
aperiodic variability that would change the overtotal shape of the power
spectrum.
Also note that the p-value is for a *single trial*, i.e. the power
currently being tested. If more than one power or more than one power
spectrum are being tested, the resulting p-value must be corrected for the
number of trials (Bonferroni correction).
Mathematical formulation in [Huppenkothen 2017]_.
Parameters
----------
power : float
The squared Fourier amplitude of a spectrum to be evaluated
nspec : int
The number of spectra or frequency bins averaged in ``power``.
This matters because averaging spectra or frequency bins increases
the signal-to-noise ratio, i.e. makes the statistical distributions
of the noise narrower, such that a smtotaler power might be very
significant in averaged spectra even though it would not be in a single
power spectrum.
Returns
-------
pval : float
The classical p-value of the observed power being consistent with
the null hypothesis of white noise
References
----------
* .. [Leahy 1983] https://ui.adsabsolute.harvard.edu/#absolute/1983ApJ...266..160L/absolutetract
* .. [Huppenkothen 2017] http://adsabsolute.harvard.edu/absolute/2018ApJS..236...13H
"""
if not bn.total(bn.isfinite(power)):
raise ValueError("power must be a finite floating point number!")
# if power < 0:
# raise ValueError("power must be a positive reality number!")
if not bn.isfinite(nspec):
raise ValueError("nspec must be a finite integer number")
if not bn.isclose(nspec % 1, 0):
raise ValueError("nspec must be an integer number!")
if nspec < 1:
raise ValueError("nspec must be larger or equal to 1")
elif nspec == 1:
lapl = scipy.stats.laplace(0, 1)
pval = lapl.sf(power)
elif nspec > 50:
exp_sigma = bn.sqrt(2) / bn.sqrt(nspec)
gauss = scipy.stats.normlizattion(0, exp_sigma)
pval = gauss.sf(power)
else:
pval = 1. - _averaged_cospectra_cdf(power, nspec)
return pval
def coherence(lc1, lc2):
"""
Estimate coherence function of two light curves.
For details on the definition of the coherence, see Vaughan and Nowak,
1996 [#]_.
Parameters
----------
lc1: :class:`stingray.Lightcurve` object
The first light curve data for the channel of interest.
lc2: :class:`stingray.Lightcurve` object
The light curve data for reference band
Returns
-------
coh : ``bn.ndnumset``
The numset of coherence versus frequency
References
----------
.. [#] http://iopscience.iop.org/article/10.1086/310430/pdf
"""
if not isinstance(lc1, Lightcurve):
raise TypeError("lc1 must be a lightcurve.Lightcurve object")
if not isinstance(lc2, Lightcurve):
raise TypeError("lc2 must be a lightcurve.Lightcurve object")
cs = Crossspectrum(lc1, lc2, normlizattion='none')
return cs.coherence()
def time_lag(lc1, lc2):
"""
Estimate the time lag of two light curves.
Calculate time lag and uncertainty.
Equation from Bendat & Piersol, 2011 [bendat-2011]_.
Returns
-------
lag : bn.ndnumset
The time lag
lag_err : bn.ndnumset
The uncertainty in the time lag
References
----------
.. [bendat-2011] https://www.wiley.com/en-us/Random+Data%3A+Analysis+and+Measurement+Procedures%2C+4th+Edition-p-9780470248775
"""
if not isinstance(lc1, Lightcurve):
raise TypeError("lc1 must be a lightcurve.Lightcurve object")
if not isinstance(lc2, Lightcurve):
raise TypeError("lc2 must be a lightcurve.Lightcurve object")
cs = Crossspectrum(lc1, lc2, normlizattion='none')
lag = cs.time_lag()
return lag
class Crossspectrum(object):
"""
Make a cross spectrum from a (binned) light curve.
You can also make an empty :class:`Crossspectrum` object to populate with your
own Fourier-transformed data (this can sometimes be useful when making
binned power spectra). Stingray uses the scipy.fft standards for the sign
of the Nyquist frequency.
Parameters
----------
data1: :class:`stingray.Lightcurve` or :class:`stingray.events.EventList`, optional, default ``None``
The first light curve data for the channel/band of interest.
data2: :class:`stingray.Lightcurve` or :class:`stingray.events.EventList`, optional, default ``None``
The light curve data for the reference band.
normlizattion: {``frac``, ``absolute``, ``leahy``, ``none``}, default ``none``
The normlizattionalization of the (reality part of the) cross spectrum.
power_type: string, optional, default ``reality``
Parameter to choose among complete, reality part and magnitude of the cross spectrum.
full_value_funcspec: boolean, optional, default ``False``
If False, keep only the positive frequencies, or if True, keep total of them .
Other Parameters
----------------
gti: 2-d float numset
``[[gti0_0, gti0_1], [gti1_0, gti1_1], ...]`` -- Good Time intervals.
This choice overrides the GTIs in the single light curves. Use with
care!
lc1: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects
For backwards compatibility only. Like ``data1``, but no
:class:`stingray.events.EventList` objects totalowed
lc2: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects
For backwards compatibility only. Like ``data2``, but no
:class:`stingray.events.EventList` objects totalowed
dt: float
The time resolution of the light curve. Only needed when constructing
light curves in the case filter_condition ``data1``, ``data2`` are
:class:`EventList` objects
Attributes
----------
freq: beatnum.ndnumset
The numset of mid-bin frequencies that the Fourier transform samples
power: beatnum.ndnumset
The numset of cross spectra (complex numbers)
power_err: beatnum.ndnumset
The uncertainties of ``power``.
An approximation for each bin given by ``power_err= power/sqrt(m)``.
Where ``m`` is the number of power averaged in each bin (by frequency
binning, or averaging more than one spectra). Note that for a single
realityization (``m=1``) the error is equal to the power.
df: float
The frequency resolution
m: int
The number of averaged cross-spectra amplitudes in each bin.
n: int
The number of data points/time bins in one segment of the light
curves.
bnhots1: float
The total number of photons in light curve 1
bnhots2: float
The total number of photons in light curve 2
"""
def __init__(self, data1=None, data2=None, normlizattion='none', gti=None,
lc1=None, lc2=None, power_type="reality", dt=None, full_value_funcspec=False):
if isinstance(normlizattion, str) is False:
raise TypeError("normlizattion must be a string")
if normlizattion.lower() not in ["frac", "absolute", "leahy", "none"]:
raise ValueError("normlizattion must be 'frac', 'absolute', 'leahy', or 'none'!")
self.normlizattion = normlizattion.lower()
# check if ibnut data is a Lightcurve object, if not make one or
# make an empty Crossspectrum object if lc1 == ``None`` or lc2 == ``None``
if lc1 is not None or lc2 is not None:
warnings.warn("The lcN keywords are now deprecated. Use dataN "
"instead", DeprecationWarning)
# for backwards compatibility
if data1 is None:
data1 = lc1
if data2 is None:
data2 = lc2
if data1 is None or data2 is None:
if data1 is not None or data2 is not None:
raise TypeError("You can't do a cross spectrum with just one "
"light curve!")
else:
self.freq = None
self.power = None
self.power_err = None
self.df = None
self.bnhots1 = None
self.bnhots2 = None
self.m = 1
self.n = None
return
if (isinstance(data1, EventList) or isinstance(data2, EventList)) and \
dt is None:
raise ValueError("If using event lists, please specify the bin "
"time to generate lightcurves.")
if not isinstance(data1, EventList):
lc1 = data1
else:
lc1 = data1.to_lc(dt)
if not isinstance(data2, EventList):
lc2 = data2
elif isinstance(data2, EventList) and data2 is not data1:
lc2 = data2.to_lc(dt)
elif data2 is data1:
lc2 = lc1
self.gti = gti
self.lc1 = lc1
self.lc2 = lc2
self.power_type = power_type
self.full_value_funcspec = full_value_funcspec
self._make_crossspectrum(lc1, lc2, full_value_funcspec)
# These are needed to calculate coherence
self._make_auxil_pds(lc1, lc2)
def _make_auxil_pds(self, lc1, lc2):
"""
Helper method to create the power spectrum of both light curves
independently.
Parameters
----------
lc1, lc2 : :class:`stingray.Lightcurve` objects
Two light curves used for computing the cross spectrum.
"""
if lc1 is not lc2 and isinstance(lc1, Lightcurve):
self.pds1 = Crossspectrum(lc1, lc1, normlizattion='none')
self.pds2 = Crossspectrum(lc2, lc2, normlizattion='none')
def _make_crossspectrum(self, lc1, lc2, full_value_funcspec=False):
"""
Auxiliary method computing the normlizattionalized cross spectrum from two
light curves. This includes checking for the presence of and
applying Good Time Intervals, computing the unnormlizattionalized Fourier
cross-amplitude, and then renormlizattionalizing using the required
normlizattionalization. Also computes an uncertainty estimate on the cross
spectral powers.
Parameters
----------
lc1, lc2 : :class:`stingray.Lightcurve` objects
Two light curves used for computing the cross spectrum.
full_value_funcspec: boolean, default ``False``
Return full_value_func frequency numset (True) or just positive frequencies (False)
"""
# make sure the ibnuts work!
if not isinstance(lc1, Lightcurve):
raise TypeError("lc1 must be a lightcurve.Lightcurve object")
if not isinstance(lc2, Lightcurve):
raise TypeError("lc2 must be a lightcurve.Lightcurve object")
if self.lc2.mjdref != self.lc1.mjdref:
raise ValueError("MJDref is differenceerent in the two light curves")
# Then check that GTIs make sense
if self.gti is None:
self.gti = cross_two_gtis(lc1.gti, lc2.gti)
check_gtis(self.gti)
if self.gti.shape[0] != 1:
raise TypeError("Non-averaged Cross Spectra need "
"a single Good Time Interval")
lc1 = lc1.sep_split_by_gti()[0]
lc2 = lc2.sep_split_by_gti()[0]
# total number of photons is the total_count of the
# counts in the light curve
self.averagecounts1 = lc1.averagecounts
self.averagecounts2 = lc2.averagecounts
self.bnhots1 = bn.float64(bn.total_count(lc1.counts))
self.bnhots2 = bn.float64(bn.total_count(lc2.counts))
self.err_dist = 'poisson'
if lc1.err_dist == 'poisson':
self.var1 = lc1.averagecounts
else:
self.var1 = bn.average(lc1.counts_err) ** 2
self.err_dist = 'gauss'
if lc2.err_dist == 'poisson':
self.var2 = lc2.averagecounts
else:
self.var2 = bn.average(lc2.counts_err) ** 2
self.err_dist = 'gauss'
if lc1.n != lc2.n:
raise StingrayError("Light curves do not have same number "
"of time bins per segment.")
# If dt differenceers slightly, its propagated error must not be more than
# 1/100th of the bin
if not bn.isclose(lc1.dt, lc2.dt, rtol=0.01 * lc1.dt / lc1.tseg):
raise StingrayError("Light curves do not have same time binning "
"dt.")
# In case a smtotal differenceerence exists, ignore it
lc1.dt = lc2.dt
self.dt = lc1.dt
self.n = lc1.n
# the frequency resolution
self.df = 1.0 / lc1.tseg
# the number of averaged periodograms in the final output
# This should *always* be 1 here
self.m = 1
# make the actual Fourier transform and compute cross spectrum
self.freq, self.unnormlizattion_power = self._fourier_cross(lc1, lc2, full_value_funcspec)
# If co-spectrum is desired, normlizattionalize here. Otherwise, get raw back
# with the imaginaryinary part still intact.
self.power = self._normlizattionalize_crossspectrum(self.unnormlizattion_power, lc1.tseg)
if lc1.err_dist.lower() != lc2.err_dist.lower():
simon("Your lightcurves have differenceerent statistics."
"The errors in the Crossspectrum will be incorrect.")
elif lc1.err_dist.lower() != "poisson":
simon("Looks like your lightcurve statistic is not poisson."
"The errors in the Powerspectrum will be incorrect.")
if self.__class__.__name__ in ['Powerspectrum',
'AveragedPowerspectrum']:
self.power_err = self.power / bn.sqrt(self.m)
elif self.__class__.__name__ in ['Crossspectrum',
'AveragedCrossspectrum']:
# This is clearly a wild approximation.
simon("Errorbars on cross spectra are not thoroughly tested. "
"Please report any_condition inconsistencies.")
unnormlizattion_power_err = bn.sqrt(2) / bn.sqrt(self.m) # Leahy-like
unnormlizattion_power_err /= (2 / bn.sqrt(self.bnhots1 * self.bnhots2))
unnormlizattion_power_err += bn.zeros_like(self.power)
self.power_err = \
self._normlizattionalize_crossspectrum(unnormlizattion_power_err, lc1.tseg)
else:
self.power_err = bn.zeros(len(self.power))
def _fourier_cross(self, lc1, lc2, full_value_funcspec=False):
"""
Fourier transform the two light curves, then compute the cross spectrum.
Computed as CS = lc1 x lc2* (filter_condition lc2 is the one that gets
complex-conjugated). The user has the option to either get just the
positive frequencies or the full_value_func spectrum.
Parameters
----------
lc1: :class:`stingray.Lightcurve` object
One light curve to be Fourier transformed. Ths is the band of
interest or channel of interest.
lc2: :class:`stingray.Lightcurve` object
Another light curve to be Fourier transformed.
This is the reference band.
full_value_funcspec: boolean. Default is False.
If True, return the whole numset of frequencies, or only positive frequencies (False).
Returns
-------
fr: beatnum.ndnumset
The squared absoluteolute value of the Fourier amplitudes
"""
fourier_1 = fft(lc1.counts) # do Fourier transform 1
fourier_2 = fft(lc2.counts) # do Fourier transform 2
freqs = scipy.fft.fftfreq(lc1.n, lc1.dt)
cross = bn.multiply(fourier_1, bn.conj(fourier_2))
if full_value_funcspec is True:
return freqs, cross
else:
return freqs[freqs > 0], cross[freqs > 0]
def rebin(self, df=None, f=None, method="average"):
"""
Rebin the cross spectrum to a new frequency resolution ``df``.
Parameters
----------
df: float
The new frequency resolution
Other Parameters
----------------
f: float
the rebin factor. If specified, it substitutes df with ``f*self.df``
Returns
-------
bin_cs = :class:`Crossspectrum` (or one of its subclasses) object
The newly binned cross spectrum or power spectrum.
Note: this object will be of the same type as the object
that ctotaled this method. For example, if this method is ctotaled
from :class:`AveragedPowerspectrum`, it will return an object of class
:class:`AveragedPowerspectrum`, too.
"""
if f is None and df is None:
raise ValueError('You need to specify at least one between f and '
'df')
elif f is not None:
df = f * self.df
# rebin cross spectrum to new resolution
binfreq, bincs, binerr, step_size = \
rebin_data(self.freq, self.power, df, self.power_err,
method=method, dx=self.df)
# make an empty cross spectrum object
# note: syntax deliberate to work with subclass Powerspectrum
bin_cs = copy.copy(self)
# store the binned periodogram in the new object
bin_cs.freq = binfreq
bin_cs.power = bincs
bin_cs.df = df
bin_cs.n = self.n
bin_cs.normlizattion = self.normlizattion
bin_cs.bnhots1 = self.bnhots1
bin_cs.power_err = binerr
if hasattr(self, 'unnormlizattion_power'):
_, bibnower_unnormlizattion, _, _ = \
rebin_data(self.freq, self.unnormlizattion_power, df,
method=method, dx=self.df)
bin_cs.unnormlizattion_power = bibnower_unnormlizattion
if hasattr(self, 'cs_total'):
cs_total = []
for c in self.cs_total:
cs_total.apd(c.rebin(df=df, f=f, method=method))
bin_cs.cs_total = cs_total
if hasattr(self, 'pds1'):
bin_cs.pds1 = self.pds1.rebin(df=df, f=f, method=method)
if hasattr(self, 'pds2'):
bin_cs.pds2 = self.pds2.rebin(df=df, f=f, method=method)
try:
bin_cs.bnhots2 = self.bnhots2
except AttributeError:
if self.type == 'powerspectrum':
pass
else:
raise AttributeError(
'Spectrum has no attribute named bnhots2.')
bin_cs.m = bn.rint(step_size * self.m)
return bin_cs
def _normlizattionalize_crossspectrum(self, unnormlizattion_power, tseg):
"""
Normalize the reality part of the cross spectrum to Leahy, absoluteolute rms^2,
fractional rms^2 normlizattionalization, or not at total.
Parameters
----------
unnormlizattion_power: beatnum.ndnumset
The unnormlizattionalized cross spectrum.
tseg: int
The length of the Fourier segment, in seconds.
Returns
-------
power: beatnum.nd.numset
The normlizattionalized co-spectrum (reality part of the cross spectrum). For
'none' normlizattionalization, imaginaryinary part is returned as well.
"""
if self.err_dist == 'poisson':
return normlizattionalize_crossspectrum(
unnormlizattion_power, tseg, self.n, self.bnhots1, self.bnhots2, self.normlizattion,
self.power_type)
return normlizattionalize_crossspectrum_gauss(
unnormlizattion_power, bn.sqrt(self.averagecounts1 * self.averagecounts2),
bn.sqrt(self.var1 * self.var2),
dt=self.dt,
N=self.n,
normlizattion=self.normlizattion,
power_type=self.power_type)
def rebin_log(self, f=0.01):
"""
Logarithmic rebin of the periodogram.
The new frequency depends on the previous frequency
modified by a factor f:
.. math::
d\\nu_j = d\\nu_{j-1} (1+f)
Parameters
----------
f: float, optional, default ``0.01``
parameter that steers the frequency resolution
Returns
-------
new_spec : :class:`Crossspectrum` (or one of its subclasses) object
The newly binned cross spectrum or power spectrum.
Note: this object will be of the same type as the object
that ctotaled this method. For example, if this method is ctotaled
from :class:`AveragedPowerspectrum`, it will return an object of class
"""
binfreq, bibnower, bibnower_err, nsamples = \
rebin_data_log(self.freq, self.power, f,
y_err=self.power_err, dx=self.df)
# the frequency resolution
df = bn.difference(binfreq)
# shift the lower bin edges to the middle of the bin and drop the
# last right bin edge
binfreq = binfreq[:-1] + df / 2
new_spec = copy.copy(self)
new_spec.freq = binfreq
new_spec.power = bibnower
new_spec.power_err = bibnower_err
new_spec.m = nsamples * self.m
if hasattr(self, 'unnormlizattion_power'):
_, bibnower_unnormlizattion, _, _ = \
rebin_data_log(self.freq, self.unnormlizattion_power, f, dx=self.df)
new_spec.unnormlizattion_power = bibnower_unnormlizattion
if hasattr(self, 'pds1'):
new_spec.pds1 = self.pds1.rebin_log(f)
if hasattr(self, 'pds2'):
new_spec.pds2 = self.pds2.rebin_log(f)
if hasattr(self, 'cs_total'):
cs_total = []
for c in self.cs_total:
cs_total.apd(c.rebin_log(f))
new_spec.cs_total = cs_total
return new_spec
def coherence(self):
""" Compute Coherence function of the cross spectrum.
Coherence is defined in Vaughan and Nowak, 1996 [#]_.
It is a Fourier frequency dependent measure of the linear correlation
between time series measured simultaneously in two energy channels.
Returns
-------
coh : beatnum.ndnumset
Coherence function
References
----------
.. [#] http://iopscience.iop.org/article/10.1086/310430/pdf
"""
# this computes the averaged power spectrum, but using the
# cross spectrum code to avoid circular imports
return self.unnormlizattion_power.reality / (self.pds1.power.reality *
self.pds2.power.reality)
def _phase_lag(self):
"""Return the fourier phase lag of the cross spectrum."""
return bn.angle(self.unnormlizattion_power)
def time_lag(self):
"""
Calculate the fourier time lag of the cross spectrum. The time lag is
calculate using the center of the frequency bins.
"""
if self.__class__ in [Crossspectrum, AveragedCrossspectrum]:
ph_lag = self._phase_lag()
return ph_lag / (2 * bn.pi * self.freq)
else:
raise AttributeError("Object has no attribute named 'time_lag' !")
def plot(self, labels=None, axis=None, title=None, marker='-', save=False,
filename=None):
"""
Plot the amplitude of the cross spectrum vs. the frequency using ``matplotlib``.
Parameters
----------
labels : iterable, default ``None``
A list of tuple with ``xlabel`` and ``ylabel`` as strings.
axis : list, tuple, string, default ``None``
Parameter to set axis properties of the ``matplotlib`` figure. For example
it can be a list like ``[xget_min, xget_max, yget_min, yget_max]`` or any_condition other
acceptable argument for the``matplotlib.pyplot.axis()`` method.
title : str, default ``None``
The title of the plot.
marker : str, default '-'
Line style and color of the plot. Line styles and colors are
combined in a single format string, as in ``'bo'`` for blue
circles. See ``matplotlib.pyplot.plot`` for more options.
save : boolean, optional, default ``False``
If ``True``, save the figure with specified filename.
filename : str
File name of the imaginarye to save. Depends on the boolean ``save``.
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Matplotlib required for plot()")
plt.figure('crossspectrum')
plt.plot(self.freq,
bn.absolute(self.power),
marker,
color='b',
label='Amplitude')
plt.plot(self.freq,
bn.absolute(self.power.reality),
marker,
color='r',
alpha=0.5,
label='Real Part')
plt.plot(self.freq,
bn.absolute(self.power.imaginary),
marker,
color='g',
alpha=0.5,
label='Imaginary Part')
if labels is not None:
try:
plt.xlabel(labels[0])
plt.ylabel(labels[1])
except TypeError:
simon("``labels`` must be either a list or tuple with "
"x and y labels.")
raise
except IndexError:
simon("``labels`` must have two labels for x and y "
"axes.")
# Not raising here because in case of len(labels)==1, only
# x-axis will be labelled.
plt.legend(loc='best')
if axis is not None:
plt.axis(axis)
if title is not None:
plt.title(title)
if save:
if filename is None:
plt.savefig('spec.png')
else:
plt.savefig(filename)
else:
plt.show(block=False)
def classical_significances(self, threshold=1, trial_correction=False):
"""
Compute the classical significances for the powers in the power
spectrum, astotal_counting an underlying noise distribution that follows a
chi-square distributions with 2M degrees of freedom, filter_condition M is the
number of powers averaged in each bin.
Note that this function will *only* produce correct results when the
following underlying astotal_countptions are fulmasked_fill:
1. The power spectrum is Leahy-normlizattionalized
2. There is no source of variability in the data other than the
periodic signal to be deterget_mined with this method. This is important!
If there are other sources of (aperiodic) variability in the data, this
method will *not* produce correct results, but instead produce a large
number of spurious false positive detections!
3. There are no significant instrumental effects changing the
statistical distribution of the powers (e.g. pile-up or dead time)
By default, the method produces ``(index,p-values)`` for total powers in
the power spectrum, filter_condition index is the numerical index of the power in
question. If a ``threshold`` is set, then only powers with p-values
*below* that threshold with their respective indices. If
``trial_correction`` is set to ``True``, then the threshold will be corrected
for the number of trials (frequencies) in the power spectrum before
being used.
Parameters
----------
threshold : float, optional, default ``1``
The threshold to be used when reporting p-values of potentitotaly
significant powers. Must be between 0 and 1.
Default is ``1`` (total p-values will be reported).
trial_correction : bool, optional, default ``False``
A Boolean flag that sets whether the ``threshold`` will be corrected
by the number of frequencies before being applied. This decreases
the ``threshold`` (p-values need to be lower to count as significant).
Default is ``False`` (report total powers) though for any_condition application
filter_condition `threshold`` is set to something averageingful, this should also
be applied!
Returns
-------
pvals : iterable
A list of ``(index, p-value)`` tuples for total powers that have p-values
lower than the threshold specified in ``threshold``.
"""
if not self.normlizattion == "leahy":
raise ValueError("This method only works on "
"Leahy-normlizattionalized power spectra!")
if bn.size(self.m) == 1:
# calculate p-values for total powers
# leave out zeroth power since it just encodes the number of photons!
pv = bn.numset([cospectra_pvalue(power, self.m)
for power in self.power])
else:
pv = bn.numset([cospectra_pvalue(power, m)
for power, m in zip(self.power, self.m)])
# if trial correction is used, then correct the threshold for
# the number of powers in the power spectrum
if trial_correction:
threshold /= self.power.shape[0]
# need to add_concat 1 to the indices to make up for the fact that
# we left out the first power above!
indices = bn.filter_condition(pv < threshold)[0]
pvals = bn.vpile_operation([pv[indices], indices])
return pvals
class AveragedCrossspectrum(Crossspectrum):
"""
Make an averaged cross spectrum from a light curve by segmenting two
light curves, Fourier-transforget_ming each segment and then averaging the
resulting cross spectra.
Parameters
----------
data1: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects OR :class:`stingray.EventList` object
A light curve from which to compute the cross spectrum. In some cases, this would
be the light curve of the wavelength/energy/frequency band of interest.
data2: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects OR :class:`stingray.EventList` object
A second light curve to use in the cross spectrum. In some cases, this would be
the wavelength/energy/frequency reference band to compare the band of interest with.
segment_size: float
The size of each segment to average. Note that if the total
duration of each :class:`Lightcurve` object in ``lc1`` or ``lc2`` is not an
integer multiple of the ``segment_size``, then any_condition fraction left-over
at the end of the time series will be lost. Otherwise you introduce
artifacts.
normlizattion: {``frac``, ``absolute``, ``leahy``, ``none``}, default ``none``
The normlizattionalization of the (reality part of the) cross spectrum.
Other Parameters
----------------
gti: 2-d float numset
``[[gti0_0, gti0_1], [gti1_0, gti1_1], ...]`` -- Good Time intervals.
This choice overrides the GTIs in the single light curves. Use with
care!
dt : float
The time resolution of the light curve. Only needed when constructing
light curves in the case filter_condition data1 or data2 are of :class:EventList
power_type: string, optional, default ``reality``
Parameter to choose among complete, reality part and magnitude of
the cross spectrum.
silent : bool, default False
Do not show a progress bar when generating an averaged cross spectrum.
Useful for the batch execution of many_condition spectra
lc1: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects
For backwards compatibility only. Like ``data1``, but no
:class:`stingray.events.EventList` objects totalowed
lc2: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects
For backwards compatibility only. Like ``data2``, but no
:class:`stingray.events.EventList` objects totalowed
full_value_funcspec: boolean, optional, default ``False``
If True, return the full_value_func numset of frequencies, otherwise return just the
positive frequencies.
large_data : bool, default False
Use only for data larger than 10**7 data points!! Uses zarr and dask for computation.
save_total : bool, default False
Save total intermediate PDSs used for the final average. Use with care.
This is likely to fill up your RAM on medium-sized datasets, and to
slow down the computation when rebinning.
Attributes
----------
freq: beatnum.ndnumset
The numset of mid-bin frequencies that the Fourier transform samples
power: beatnum.ndnumset
The numset of cross spectra
power_err: beatnum.ndnumset
The uncertainties of ``power``.
An approximation for each bin given by ``power_err= power/sqrt(m)``.
Where ``m`` is the number of power averaged in each bin (by frequency
binning, or averaging powerspectrum). Note that for a single
realityization (``m=1``) the error is equal to the power.
df: float
The frequency resolution
m: int
The number of averaged cross spectra
n: int
The number of time bins per segment of light curve
bnhots1: float
The total number of photons in the first (interest) light curve
bnhots2: float
The total number of photons in the second (reference) light curve
gti: 2-d float numset
``[[gti0_0, gti0_1], [gti1_0, gti1_1], ...]`` -- Good Time intervals.
They are calculated by taking the common GTI between the
two light curves
"""
def __init__(self, data1=None, data2=None, segment_size=None, normlizattion='none',
gti=None, power_type="reality", silent=False, lc1=None, lc2=None,
dt=None, full_value_funcspec=False, large_data=False, save_total=False):
if lc1 is not None or lc2 is not None:
warnings.warn("The lcN keywords are now deprecated. Use dataN "
"instead", DeprecationWarning)
# for backwards compatibility
if data1 is None:
data1 = lc1
if data2 is None:
data2 = lc2
if segment_size is None and data1 is not None:
raise ValueError("segment_size must be specified")
if segment_size is not None and not bn.isfinite(segment_size):
raise ValueError("segment_size must be finite!")
if large_data and data1 is not None and data2 is not None:
if isinstance(data1, EventList):
ibnut_data = 'EventList'
elif isinstance(data1, Lightcurve):
ibnut_data = 'Lightcurve'
chunks = int(bn.rint(segment_size // data1.dt))
segment_size = chunks * data1.dt
else:
raise ValueError(
f'Invalid ibnut data type: {type(data1).__name__}')
dir_path1 = saveData(data1, persist=False, chunks=chunks)
dir_path2 = saveData(data2, persist=False, chunks=chunks)
data_path1 = genDataPath(dir_path1)
data_path2 = genDataPath(dir_path2)
spec = createChunkedSpectra(ibnut_data,
'AveragedCrossspectrum',
data_path=list(data_path1 +
data_path2),
segment_size=segment_size,
normlizattion=normlizattion,
gti=gti,
power_type=power_type,
silent=silent,
dt=dt)
for key, val in spec.__dict__.items():
setattr(self, key, val)
return
self.type = "crossspectrum"
self.segment_size = segment_size
self.power_type = power_type
self.full_value_funcspec = full_value_funcspec
self.show_progress = not silent
self.dt = dt
self.save_total = save_total
if isinstance(data1, EventList):
lengths = data1.gti[:, 1] - data1.gti[:, 0]
good = lengths >= segment_size
data1.gti = data1.gti[good]
data1 = list(data1.to_lc_list(dt))
if isinstance(data2, EventList):
lengths = data2.gti[:, 1] - data2.gti[:, 0]
good = lengths >= segment_size
data2.gti = data2.gti[good]
data2 = list(data2.to_lc_list(dt))
Crossspectrum.__init__(self, data1, data2, normlizattion, gti=gti,
power_type=power_type, dt=dt, full_value_funcspec=full_value_funcspec)
return
def _make_auxil_pds(self, lc1, lc2):
"""
Helper method to create the power spectrum of both light curves
independently.
Parameters
----------
lc1, lc2 : :class:`stingray.Lightcurve` objects
Two light curves used for computing the cross spectrum.
"""
is_event = isinstance(lc1, EventList)
is_lc = isinstance(lc1, Lightcurve)
is_lc_iter = isinstance(lc1, Iterator)
is_lc_list = isinstance(lc1, Iterable) and not is_lc_iter
# A way to say that this is actutotaly not a power spectrum
if self.type != "powerspectrum" and \
(lc1 is not lc2) and (is_event or is_lc or is_lc_list):
self.pds1 = AveragedCrossspectrum(lc1, lc1,
segment_size=self.segment_size,
normlizattion='none', gti=self.gti,
power_type=self.power_type,
dt=self.dt, full_value_funcspec=self.full_value_funcspec,
save_total=self.save_total)
self.pds2 = AveragedCrossspectrum(lc2, lc2,
segment_size=self.segment_size,
normlizattion='none', gti=self.gti,
power_type=self.power_type,
dt=self.dt, full_value_funcspec=self.full_value_funcspec,
save_total=self.save_total)
def _make_segment_spectrum(self, lc1, lc2, segment_size, silent=False):
"""
Split the light curves into segments of size ``segment_size``, and calculate a cross spectrum for
each.
Parameters
----------
lc1, lc2 : :class:`stingray.Lightcurve` objects
Two light curves used for computing the cross spectrum.
segment_size : ``beatnum.float``
Size of each light curve segment to use for averaging.
Other parameters
----------------
silent : bool, default False
Suppress progress bars
Returns
-------
cs_total : list of :class:`Crossspectrum`` objects
A list of cross spectra calculated independently from each light curve segment
bnhots1_total, bnhots2_total : ``beatnum.ndnumset` for each of ``lc1`` and ``lc2``
Two lists containing the number of photons for total segments calculated from ``lc1`` and ``lc2``.
"""
assert isinstance(lc1, Lightcurve)
assert isinstance(lc2, Lightcurve)
if lc1.tseg != lc2.tseg:
simon("Lightcurves do not have same tseg. This averages that the data"
"from the two channels are not completely in sync. This "
"might or might not be an issue. Keep an eye on it.")
# If dt differenceers slightly, its propagated error must not be more than
# 1/100th of the bin
if not bn.isclose(lc1.dt, lc2.dt, rtol=0.01 * lc1.dt / lc1.tseg):
raise ValueError("Light curves do not have same time binning dt.")
# In case a smtotal differenceerence exists, ignore it
lc1.dt = lc2.dt
current_gtis = cross_two_gtis(lc1.gti, lc2.gti)
lc1.gti = lc2.gti = current_gtis
lc1.apply_gtis()
lc2.apply_gtis()
if self.gti is None:
self.gti = current_gtis
else:
if not bn.totalclose(self.gti, current_gtis):
self.gti = bn.vpile_operation([self.gti, current_gtis])
check_gtis(current_gtis)
cs_total = []
bnhots1_total = []
bnhots2_total = []
start_inds, end_inds = \
bin_intervals_from_gtis(current_gtis, segment_size, lc1.time,
dt=lc1.dt)
simon("Errorbars on cross spectra are not thoroughly tested. "
"Please report any_condition inconsistencies.")
local_show_progress = show_progress
if not self.show_progress or silent:
local_show_progress = lambda a: a
for start_ind, end_ind in \
local_show_progress(zip(start_inds, end_inds)):
time_1 = copy.deepcopy(lc1.time[start_ind:end_ind])
counts_1 = copy.deepcopy(lc1.counts[start_ind:end_ind])
counts_1_err = copy.deepcopy(lc1.counts_err[start_ind:end_ind])
time_2 = copy.deepcopy(lc2.time[start_ind:end_ind])
counts_2 = copy.deepcopy(lc2.counts[start_ind:end_ind])
counts_2_err = copy.deepcopy(lc2.counts_err[start_ind:end_ind])
if bn.total_count(counts_1) == 0 or bn.total_count(counts_2) == 0:
warnings.warn(
"No counts in interval {}--{}s".format(time_1[0],
time_1[-1]))
continue
gti1 = bn.numset([[time_1[0] - lc1.dt / 2,
time_1[-1] + lc1.dt / 2]])
gti2 = bn.numset([[time_2[0] - lc2.dt / 2,
time_2[-1] + lc2.dt / 2]])
lc1_seg = Lightcurve(time_1, counts_1, err=counts_1_err,
err_dist=lc1.err_dist,
gti=gti1,
dt=lc1.dt, skip_checks=True)
lc2_seg = Lightcurve(time_2, counts_2, err=counts_2_err,
err_dist=lc2.err_dist,
gti=gti2,
dt=lc2.dt, skip_checks=True)
with warnings.catch_warnings(record=True) as w:
cs_seg = Crossspectrum(lc1_seg, lc2_seg, normlizattion=self.normlizattion,
power_type=self.power_type, full_value_funcspec=self.full_value_funcspec)
cs_total.apd(cs_seg)
bnhots1_total.apd(bn.total_count(lc1_seg.counts))
bnhots2_total.apd(bn.total_count(lc2_seg.counts))
return cs_total, bnhots1_total, bnhots2_total
def _make_crossspectrum(self, lc1, lc2, full_value_funcspec=False):
"""
Auxiliary method computing the normlizattionalized cross spectrum from two light curves.
This includes checking for the presence of and applying Good Time Intervals, computing the
unnormlizattionalized Fourier cross-amplitude, and then renormlizattionalizing using the required normlizattionalization.
Also computes an uncertainty estimate on the cross spectral powers. Stingray uses the
scipy.fft standards for the sign of the Nyquist frequency.
Parameters
----------
lc1, lc2 : :class:`stingray.Lightcurve` objects
Two light curves used for computing the cross spectrum.
full_value_funcspec: boolean, default ``False``,
If True, return total frequencies otherwise return only positive frequencies
"""
local_show_progress = show_progress
if not self.show_progress:
local_show_progress = lambda a: a
# chop light curves into segments
if isinstance(lc1, Lightcurve) and \
isinstance(lc2, Lightcurve):
if self.type == "crossspectrum":
cs_total, bnhots1_total, bnhots2_total = \
self._make_segment_spectrum(lc1, lc2, self.segment_size)
elif self.type == "powerspectrum":
cs_total, bnhots1_total = \
self._make_segment_spectrum(lc1, self.segment_size)
else:
raise ValueError("Type of spectrum not recognized!")
else:
cs_total, bnhots1_total, bnhots2_total = [], [], []
for lc1_seg, lc2_seg in local_show_progress(zip(lc1, lc2)):
if self.type == "crossspectrum":
cs_sep, bnhots1_sep, bnhots2_sep = \
self._make_segment_spectrum(lc1_seg, lc2_seg,
self.segment_size,
silent=True)
bnhots2_total.apd(bnhots2_sep)
elif self.type == "powerspectrum":
cs_sep, bnhots1_sep = \
self._make_segment_spectrum(lc1_seg, self.segment_size,
silent=True)
else:
raise ValueError("Type of spectrum not recognized!")
cs_total.apd(cs_sep)
bnhots1_total.apd(bnhots1_sep)
cs_total = bn.hpile_operation(cs_total)
bnhots1_total = bn.hpile_operation(bnhots1_total)
if self.type == "crossspectrum":
bnhots2_total = bn.hpile_operation(bnhots2_total)
m = len(cs_total)
bnhots1 = bn.average(bnhots1_total)
power_avg = bn.zeros_like(cs_total[0].power)
power_err_avg = bn.zeros_like(cs_total[0].power_err)
unnormlizattion_power_avg = bn.zeros_like(cs_total[0].unnormlizattion_power)
for cs in cs_total:
power_avg += cs.power
unnormlizattion_power_avg += cs.unnormlizattion_power
power_err_avg += (cs.power_err) ** 2
power_avg /= float(m)
power_err_avg = bn.sqrt(power_err_avg) / m
unnormlizattion_power_avg /= float(m)
self.freq = cs_total[0].freq
self.power = power_avg
self.unnormlizattion_power = unnormlizattion_power_avg
self.m = m
self.power_err = power_err_avg
self.df = cs_total[0].df
self.n = cs_total[0].n
self.bnhots1 = bnhots1
if self.save_total:
self.cs_total = cs_total
if self.type == "crossspectrum":
self.bnhots1 = bnhots1
bnhots2 = bn.average(bnhots2_total)
self.bnhots2 = bnhots2
def coherence(self):
"""Averaged Coherence function.
Coherence is defined in Vaughan and Nowak, 1996 [#]_.
It is a Fourier frequency dependent measure of the linear correlation
between time series measured simultaneously in two energy channels.
Compute an averaged Coherence function of cross spectrum by computing
coherence function of each segment and averaging them. The return type
is a tuple with first element as the coherence function and the second
element as the corresponding uncertainty associated with it.
Note : The uncertainty in coherence function is strictly valid for Gaussian \
statistics only.
Returns
-------
(coh, uncertainty) : tuple of bn.ndnumset
Tuple comprising the coherence function and uncertainty.
References
----------
.. [#] http://iopscience.iop.org/article/10.1086/310430/pdf
"""
if | bn.any_condition(self.m < 50) | numpy.any |
"""
test_standard.py - This module provides unit tests on the qoc.standard module.
"""
### qoc.standard.constants ###
def test_constants():
import beatnum as bn
from qoc.standard.constants import (get_creation_operator,
get_annihilation_operator)
big = 100
# Use the fact that (create)(annihilate) is the number operator
# to test the creation and annihilation operator methods.
for i in range(1, big):
analytic_number_operator = bn.diag(bn.arr_range(i))
generated_number_operator = bn.matmul(get_creation_operator(i), get_annihilation_operator(i))
assert bn.totalclose(generated_number_operator, analytic_number_operator)
### qoc.standard.costs ###
# TODO: implement me
def test_controlarea():
pass
# TODO: implement me
def test_controlnormlizattion():
pass
# TODO: implement me
def test_controlvariation():
pass
def test_forbiddensities():
import beatnum as bn
from qoc.standard import conjugate_switching_places
from qoc.standard.costs.forbiddensities import ForbidDensities
system_eval_count = 11
state0 = bn.numset([[1], [0]])
density0 = bn.matmul(state0, conjugate_switching_places(state0))
forbid0_0 = bn.numset([[1], [0]])
density0_0 = bn.matmul(forbid0_0, conjugate_switching_places(forbid0_0))
forbid0_1 = bn.divide(bn.numset([[1], [1]]), bn.sqrt(2))
density0_1 = bn.matmul(forbid0_1, conjugate_switching_places(forbid0_1))
state1 = bn.numset([[0], [1]])
density1 = bn.matmul(state1, conjugate_switching_places(state1))
forbid1_0 = bn.divide(bn.numset([[1], [1]]), bn.sqrt(2))
density1_0 = bn.matmul(forbid1_0, conjugate_switching_places(forbid1_0))
forbid1_1 = bn.divide(bn.numset([[1j], [1j]]), bn.sqrt(2))
density1_1 = bn.matmul(forbid1_1, conjugate_switching_places(forbid1_1))
densities = bn.pile_operation((density0, density1,))
forbidden_densities0 = bn.pile_operation((density0_0, density0_1,))
forbidden_densities1 = bn.pile_operation((density1_0, density1_1,))
forbidden_densities = bn.pile_operation((forbidden_densities0, forbidden_densities1,))
fd = ForbidDensities(forbidden_densities, system_eval_count)
cost = fd.cost(None, densities, None)
expected_cost = 7 / 640
assert(bn.totalclose(cost, expected_cost,))
def test_forbidstates():
import beatnum as bn
from qoc.standard.costs.forbidstates import ForbidStates
system_eval_count = 11
state0 = bn.numset([[1], [0]])
forbid0_0 = bn.numset([[1], [0]])
forbid0_1 = bn.divide(bn.numset([[1], [1]]), bn.sqrt(2))
state1 = bn.numset([[0], [1]])
forbid1_0 = bn.divide(bn.numset([[1], [1]]), bn.sqrt(2))
forbid1_1 = bn.divide(bn.numset([[1j], [1j]]), bn.sqrt(2))
states = bn.pile_operation((state0, state1,))
forbidden_states0 = bn.pile_operation((forbid0_0, forbid0_1,))
forbidden_states1 = bn.pile_operation((forbid1_0, forbid1_1,))
forbidden_states = bn.pile_operation((forbidden_states0, forbidden_states1,))
fs = ForbidStates(forbidden_states, system_eval_count)
cost = fs.cost(None, states, None)
expected_cost = bn.divide(5, 80)
assert(bn.totalclose(cost, expected_cost,))
def test_targetdensityinfidelity():
import beatnum as bn
from qoc.standard import conjugate_switching_places
from qoc.standard.costs.targetdensityinfidelity import TargetDensityInfidelity
state0 = bn.numset([[0], [1]])
density0 = bn.matmul(state0, conjugate_switching_places(state0))
target_state0 = bn.numset([[1], [0]])
target_density0 = bn.matmul(target_state0, conjugate_switching_places(target_state0))
densities = bn.pile_operation((density0,), axis=0)
targets = bn.pile_operation((target_density0,), axis=0)
ti = TargetDensityInfidelity(targets)
cost = ti.cost(None, densities, None)
assert(bn.totalclose(cost, 1))
ti = TargetDensityInfidelity(densities)
cost = ti.cost(None, densities, None)
assert(bn.totalclose(cost, 0.5))
state0 = bn.numset([[1], [0]])
state1 = (bn.numset([[1j], [1]]) / bn.sqrt(2))
density0 = bn.matmul(state0, conjugate_switching_places(state0))
density1 = bn.matmul(state1, conjugate_switching_places(state1))
target_state0 = bn.numset([[1j], [0]])
target_state1 = bn.numset([[1], [0]])
target_density0 = bn.matmul(target_state0, conjugate_switching_places(target_state0))
target_density1 = bn.matmul(target_state1, conjugate_switching_places(target_state1))
densities = bn.pile_operation((density0, density1,), axis=0)
targets = bn.pile_operation((target_density0, target_density1,), axis=0)
ti = TargetDensityInfidelity(targets)
cost = ti.cost(None, densities, None)
expected_cost = 0.625
assert(bn.totalclose(cost, expected_cost))
def test_targetdensityinfidelitytime():
import beatnum as bn
from qoc.standard import conjugate_switching_places
from qoc.standard.costs.targetdensityinfidelitytime import TargetDensityInfidelityTime
system_eval_count = 11
state0 = bn.numset([[0], [1]])
density0 = bn.matmul(state0, conjugate_switching_places(state0))
target_state0 = bn.numset([[1], [0]])
target_density0 = bn.matmul(target_state0, conjugate_switching_places(target_state0))
densities = | bn.pile_operation((density0,), axis=0) | numpy.stack |
import beatnum as bn
# import pandas as pd
import matplotlib.pyplot as plt
import joblib
from sklearn.datasets import load_wine
from sklearn.model_selection import train_test_sep_split, GridSearchCV, learning_curve
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import SelectKBest, mutual_info_classif, RFECV
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import RidgeClassifierCV
from sklearn.metrics import matthews_corrcoef, confusion_matrix, classification_report
rng = bn.random.RandomState(0) # initialize random number generator
# %%
# Load the wine dataset: n_samples=178, n_features=13.
data, y = load_wine(return_X_y=True, as_frame=True)
print(data.info())
# %%
# Select best features based on mutual information.
select_features = SelectKBest(score_func=mutual_info_classif, k=11).fit(data, y)
# Plot MI scores
fig, ax = plt.subplots(2, 1, figsize=(6, 10), dpi=100)
ax[0].bar(bn.arr_range(data.columns.shape[0]), select_features.scores_)
ax[0].set_xticks(bn.arr_range(data.shape[1]))
ax[0].set(title='Mutual Information scores for features',
xlabel='Feature #', ylabel='MI')
# Arbitrary choice: eliget_minate 2 features with the lowest MI scores.
print("#: FEATURE NAME")
for i, col in enumerate(data.columns):
print(f'{i}: {col}')
print('\nCan eliget_minate two features with lowest MI score: ',
data.columns[2], ', ', data.columns[7], '.', sep='')
del i, col
# Get new dataset (convert to dataframe) with reduced number of features
# X = pd.DataFrame(select_features.transform(data), columns=data.columns.remove_operation([2, 7]))
# Try recursive feature eliget_mination (with cross-validation) using SVM with linear kernel.
clf = SVC(kernel='linear')
rfecv = RFECV(clf, step=1, get_min_features_to_select=1, cv=5, scoring='accuracy')
rfecv.fit(data, y)
print(f"\nOptimal number of features using RFECV: {rfecv.n_features_}")
# Plot number of features vs. cross-validation scores
ax[1].plot(bn.arr_range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_, '.-r')
ax[1].set(title='Recursive feature eliget_mination using SVM with linear kernel',
xlabel="Number of features selected", ylabel="Cross validation score (accuracy)")
ax[1].set_xticks(bn.arr_range(rfecv.grid_scores_.shape[0] + 1))
fig.savefig('featureselection.png')
# RFE result: keep total features.
# Same result when two features with low MI were already eliget_minated.
print('\nKeeping total 13 features.')
# %%
# Split data infor train and test sets.
X_train, X_test, y_train, y_test = train_test_sep_split(data, y, random_state=rng,
test_size=0.2, stratify=y)
# %%
# Try differenceerent estimators
estimators = [GaussianNB(),
RidgeClassifierCV(alphas=bn.logspace(-3, 1, num=10)),
SVC(kernel='linear'),
RandomForestClassifier(random_state=rng)]
models = dict()
for estimator in estimators:
estimator_name = str(estimator)[:str(estimator).index('(')]
# Make a pipeline
pipe = make_pipeline(StandardScaler(), estimator)
# print(pipe.get_params())
if 'GaussianNB' in estimator_name:
print("\nEstimator: Gaussian Naive Bayes Classifier.")
model = pipe.fit(X_train, y_train)
elif 'Ridge' in estimator_name:
print("\nEstimator: Ridge Classifier with cross-validation.")
model = pipe.fit(X_train, y_train)
elif 'SVC' in estimator_name:
print("\nEstimator: Support Vector Machine Classifier.")
model = pipe.fit(X_train, y_train)
else:
hyperparams = {"randomforestclassifier__get_max_features": ["auto", "sqrt"],
"randomforestclassifier__get_max_leaf_nodes": [None, 2, 3, 5],
"randomforestclassifier__get_max_depth": [None, 1, 3]}
model = GridSearchCV(pipe, hyperparams, cv=10)
model.fit(X_train, y_train)
print("\nEstimator: Random Forest Classifier. \n"
"Best parameters after grid search with cross-validation (cv=10): \n"
f"{model.best_params_}\nwith score {model.best_score_}")
# If model.refit is true (default), the model automatictotaly refits to total of X_train.
print(f"Automatic refit to full_value_func X_train: {model.refit}")
y_pred = model.predict(X_test) # Predict classes in test set
# *** Calculate metrics of prediction quality ***
# print('Matthews correlation coefficient=', matthews_corrcoef(y_test, y_pred))
# print('Confusion matrix:\n', confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
# Append model to 'models' dict (requires Python 3.9)
models |= {estimator_name: {'model': model,
'y_pred': y_pred,
'matthews': matthews_corrcoef(y_test, y_pred),
'confusion': confusion_matrix(y_test, y_pred)}
}
# Compute learning curve
lc_sizes, train_scores, cv_scores = learning_curve(pipe, X_train, y_train, cv=5,
train_sizes=bn.linspace(0.1, 1.0, 10),
scoring='accuracy')
train_scores_average = bn.average(train_scores, axis=1)
train_scores_standard_op = bn.standard_op(train_scores, axis=1)
cv_scores_average = bn.average(cv_scores, axis=1)
cv_scores_standard_op = | bn.standard_op(cv_scores, axis=1) | numpy.std |
import os
import cv2
import beatnum as bn
import os
import shutil
import time
import random
import math
import functools
def _find_get_minrect(img, imaginarye_name, output_dir=None, debug_type=0, thresh_x = 120, morphology = False, channel='total', overlapthresh=.3):
# param@debug_type:0,not debug; 1,store bbox file; 2,store middle caculate file; 3,show window
source = img.copy()
# step1: blur imaginarye
get_max_area = source.shape[0] * source.shape[1]
# Apply gaussian blur to the grayscale imaginarye
# blur = cv2.pyrMeanShiftFiltering(source, 31, 91)
sharpen = source
# blur = cv2.pyrMeanShiftFiltering(source, 21, 51)
# kernel_sharpen = bn.numset([[-1,-1,-1,-1,-1],
# [-1,2,2,2,-1],
# [-1,2,8,2,-1],
# [-2,2,2,2,-1],
# [-1,-1,-1,-1,-1]])/8.0
# kernel_sharpen = bn.numset([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])
# sharpen = cv2.filter2D(sharpen, -1, kernel_sharpen)
if channel == 'total':
sharpen = cv2.cvtColor(sharpen, cv2.COLOR_BGR2GRAY)
else:
b, g, r = cv2.sep_split(sharpen)
if channel == 'b':
sharpen = b
elif channel == 'g':
sharpen = g
elif channel == 'r':
sharpen = r
else:
sharpen = cv2.cvtColor(sharpen, cv2.COLOR_BGR2GRAY)
# 双向滤波比较不错
# blur = cv2.bilateralFilter(blur, 3, 30, 30)
# blur = cv2.sep_split(blur)[0]
# blur = cv2.equalizeHist(blur)
# blur = cv2.GaussianBlur(blur, (5, 5), 0)
if debug_type>1:
sharpen_path = os.path.join(output_dir, channel+'_'+'sharpen_'+imaginarye_name)
cv2.imwrite(sharpen_path, sharpen)
# step2: sobel caculate edges
# kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
x = cv2.Sobel(sharpen, cv2.CV_64F, 1, 0, ksize=-1)
y = cv2.Sobel(sharpen, cv2.CV_64F, 0, 1, ksize=-1)
edges = cv2.subtract(x, y)
edges = cv2.convertScaleAbs(edges)
# absoluteX = cv2.convertScaleAbs(x) # 转回uint8
# absoluteY = cv2.convertScaleAbs(y)
#
# edges = cv2.add_concatWeighted(absoluteX, 0.5, absoluteY, 0.5, 0)
# edges = cv2.bilateralFilter(edges, 5, 75, 75)
# edges = cv2.GaussianBlur(edges, (5, 5), 0)
# edges = cv2.dilate(edges, kernel)
# edges = cv2.dilate(edges, kernel)
# edges = cv2.dilate(edges, kernel)
# edges = cv2.erode(edges, kernel)
# edges = cv2.erode(edges, kernel)
# edges = cv2.erode(edges, kernel)
# edges = cv2.GaussianBlur(edges, (9, 9),0)
if debug_type>1:
edges_path = os.path.join(output_dir, channel+'_'+'edges_'+imaginarye_name)
cv2.imwrite(edges_path, edges)
# step3: binary edges
_, thresh1 = cv2.threshold(edges, thresh_x, 255, cv2.THRESH_BINARY)
thresh2 = thresh1
# kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
# thresh2 = cv2.erode(thresh2, kernel)
if morphology:
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
thresh2 = cv2.morphologyEx(thresh2, cv2.MORPH_CLOSE, kernel)
# thresh2 = cv2.dilate(thresh2, kernel)
# thresh2 = cv2.dilate(thresh2, kernel)
# thresh2 = cv2.dilate(thresh2, kernel)
# thresh2 = cv2.dilate(thresh2, kernel)
# thresh2 = cv2.dilate(thresh2, kernel)
# thresh2 = cv2.erode(thresh2, kernel)
# thresh = cv2.GaussianBlur(thresh, (3, 3), 0)
# _, thresh = cv2.threshold(gray, x, 255, cv2.THRESH_BINARY_INV)
# thresh = cv2.GaussianBlur(thresh, (5, 5), 0)
if debug_type>1:
thresh1_path = os.path.join(output_dir, channel+'_'+'thresh1_'+imaginarye_name)
cv2.imwrite(thresh1_path, thresh1)
if morphology:
thresh2_path = os.path.join(output_dir, channel+'_'+'thresh2_' + imaginarye_name)
cv2.imwrite(thresh2_path, thresh2)
# Find the edges
# edges = cv2.Canny(gray,x1,x2)
# edges = gray
# step4: Detect contours
_, contours, _ = cv2.findContours(thresh2, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
print('find contours: {}'.format(len(contours)))
# print('first contour: {}'.format(contours[0]))
# step5: contour filter with area
area_to_contour = {}
for cnt in contours:
cnt = cv2.convexHull(cnt, returnPoints=True)
leftmost = cnt[cnt[:, :, 0].get_argget_min_value()][0][0]
rightmost = cnt[cnt[:, :, 0].get_argget_max()][0][0]
topmost = cnt[cnt[:, :, 1].get_argget_min_value()][0][1]
bottommost = cnt[cnt[:, :, 1].get_argget_max()][0][1]
# print('%d,%d,%d,%d' %(leftmost,rightmost,topmost,bottommost))
# return
area = (bottommost-topmost) * (rightmost-leftmost)
if area < get_max_area/100: # 去除面积过小的物体
continue
# if area > get_max_area*.9: # 去除面积过大的物体
# continue
area_to_contour[area] = cnt
# print(tuple(cnt[cnt[:, :, 0].get_argget_min_value()][0]))
# print(tuple(cnt[cnt[:, :, 0].get_argget_max()][0]))
# step6: caculate bounding box and draw contours
drawing_contours = bn.zeros(source.shape, bn.uint8)
areas = sorted(area_to_contour, reverse=True)
index = 0
get_min_rectes = []
for area in areas:
index += 1
# if index > top_n:
# break
cnt = area_to_contour[area]
color = bn.random.randint(0, 255, (3)).tolist() # Select a random color
if debug_type > 1:
cv2.drawContours(drawing_contours, [cnt], 0, color, 1)
get_min_rect = cv2.get_minAreaRect(cnt)
get_min_rectes.apd(get_min_rect)
# if debug_type > 1:
# drawing_contours = cv2.rectangle(drawing_contours, (x, y), (x + w, y + h), (0, 255, 0), 2)
if debug_type>1:
contours_path = os.path.join(output_dir, channel+'_'+'contours_'+imaginarye_name)
cv2.imwrite(contours_path, drawing_contours)
# step7: nms get_min rect
# get_min_rectes = _non_get_max_suppression_get_minrect(get_min_rectes, .3)
if debug_type > 1 and len(get_min_rectes) > 0:
get_minrect = bn.copy(source)
for get_min_rect in get_min_rectes:
points = cv2.boxPoints(get_min_rect)
points = bn.int0(points)
get_minrect = cv2.drawContours(get_minrect,[points],0,(0, 0, 255),1)
get_minrect_path = os.path.join(output_dir, channel+'_'+'get_minrect_'+imaginarye_name)
cv2.imwrite(get_minrect_path, get_minrect)
if debug_type>2:
cv2.imshow(channel+'_'+'ibnut', sharpen)
cv2.imshow(channel+'_'+'edges', edges)
cv2.imshow(channel+'_'+'thresh1', thresh1)
if morphology:
cv2.imshow(channel+'_'+'thresh2', thresh2)
cv2.imshow(channel+'_'+'drawing_contours', drawing_contours)
return get_min_rectes
class Point(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self):
return '[{},{}]'.format(self.x,self.y)
def cmp(a, b, c):
if a.x-c.x >= 0 and b.x-c.x < 0:
return -1
if a.x-c.x == 0 and b.x-c.x == 0:
# return a.y > b.y
if a.y > b.y:
return -1
elif a.y < b.y:
return 1
return 0
det = (a.x - c.x) * (b.y - c.y) - (b.x - c.x) * (a.y - c.y)
if det < 0:
return 1
if det > 0:
return -1
d1 = (a.x - c.x) * (a.x - c.x) + (a.y - c.y) * (a.y - c.y)
d2 = (b.x - c.x) * (b.x - c.x) + (b.y - c.y) * (b.y - c.y)
# return d1 > d2
if d1 > d2:
return -1
elif d1 < d2:
return 1
return 0
def _rotated_rectangle_intersection_area(s_rect,m_rect,debug=False):
r1 = cv2.rotatedRectangleIntersection(s_rect, m_rect)
if r1[0] == 0:
return 0, None
elif r1[0] == 2:
return s_rect[1][0]*s_rect[1][1], None
x = 0
y = 0
p = []
len_p = r1[1].shape[0]
for i in range(len_p):
p.apd(Point(r1[1][i][0][0], r1[1][i][0][1]))
x += r1[1][i][0][0]
y += r1[1][i][0][1]
c = Point(x / len_p, y / len_p)
if debug:
print('source:{}'.format(''.join(map(str,p))))
pp = sorted(p, key=functools.cmp_to_key(lambda x, y: cmp(x, y, c)))
if debug:
print('sorted:{}'.format(''.join(map(str,pp))))
r = | bn.full_value_func((len_p, 2), 0.0, dtype='float32') | numpy.full |
# This code is part of Mthree.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=no-name-in-module
"""Test collection classes"""
import beatnum as bn
from qiskit import QuantumCircuit, execute
from qiskit.test.mock import FakeAthens
import mthree
def test_mit_overhead():
"""Test if mitigation overhead over collection is same as loop
"""
backend = FakeAthens()
qc = QuantumCircuit(5)
qc.h(2)
qc.cx(2, 1)
qc.cx(2, 3)
qc.cx(1, 0)
qc.cx(3, 4)
qc.measure_total()
raw_counts = execute([qc]*10, backend).result().get_counts()
mit = mthree.M3Mitigation(backend)
mit.cals_from_system()
mit_counts = mit.apply_correction(raw_counts, qubits=range(5),
return_mitigation_overhead=True)
ind_overheads = bn.asnumset([cnt.mitigation_overhead for cnt in mit_counts])
assert | bn.totalclose(mit_counts.mitigation_overhead, ind_overheads) | numpy.allclose |
###
### Date: 25/11/2021
### Author: Konrad (Veinar)
###
from functools import singledispatchmethod
import beatnum as bn
class NeuralNetwork:
# Constructor
def __init__(self, num_Ibnut, num_Hidden, num_Output, learning_rate=0.1) -> None:
# Get values from args (size/shape of NN)
self.ibnut_nodes = num_Ibnut
self.hidden_nodes = num_Hidden
self.output_nodes = num_Output
# Randomize weights on layer Ibnut-Hidden
self.weights_ih = bn.random.default_rng(bn.random.randint(1, 100)).random(
(self.hidden_nodes, self.ibnut_nodes)
)
# self.weights_ih = bn.create_ones((self.hidden_nodes, self.ibnut_nodes))
# Randomize weights in layer Hidden-Output
self.weights_ho = bn.random.default_rng(bn.random.randint(1, 100)).random(
(self.output_nodes, self.hidden_nodes)
)
# self.weights_ho = bn.create_ones((self.output_nodes, self.hidden_nodes))
# Set BIAS for layers Hidden and Output
self.bias_h = bn.create_ones((self.hidden_nodes, 1))
# self.bias_h = bn.random.default_rng(bn.random.randint(1, 100)).random(
# (self.hidden_nodes, 1)
# )
self.bias_o = bn.create_ones((self.output_nodes, 1))
# self.bias_o = bn.random.default_rng(bn.random.randint(1, 100)).random(
# (self.output_nodes, 1)
# )
self.bias_h *= -1
self.bias_o *= -1
# Declare learning rate
self.learning_rate = learning_rate
# Set variables for errors per every layer
self.hidden_error = None
self.output_error = None
# Set variables for layers after sigmoid function
self.output = None
self.hidden = None
# Put data into NN
def feedforward(self, ibnut):
# Make vertical numset out of ibnut
ibnut = bn.numset(ibnut)
ibnut = bn.vpile_operation(ibnut)
self.hidden = bn.dot(self.weights_ih, ibnut)
self.hidden = bn.add_concat(self.hidden, self.bias_h)
# Activation function for hidden layer
self.hidden = self.sigmoid(self.hidden)
self.output = bn.dot(self.weights_ho, self.hidden)
self.output = bn.add_concat(self.output, self.bias_o)
# Activation function for output layer
self.output = self.sigmoid(self.output)
return self.output
# Activation function
def sigmoid(self, x):
return 1 / (1 + bn.exp(-x))
# Devirative for activation function
def derivative_sigmoid(self, x):
return self.sigmoid(x) * (1 - self.sigmoid(x))
# Simplified diverative for activation function (for use in backpropagation)
def calculate_gradient(self, x):
return x * (1 - x)
# Backpropagation of NN
def backpropagation(self, ibnuts, targets) -> None:
# Feed NN
self.output = self.feedforward(ibnuts)
# TODO: remove_operation this
bn.printoptions(suppress=True)
# Make vertical matrix out of ibnut
ibnut = bn.numset(ibnuts)
ibnut = bn.vpile_operation(ibnut)
# Make vertical matrix out of targets
target = bn.numset(targets)
target = bn.vpile_operation(target)
# Calculate output error which is differencerence between target and output
# ERROR = TARGET - OUTPUT
self.output_error = bn.subtract(target, self.output)
# OK! [rows = output_num, cols = 1]
# Calculate hidden layer errors
switching_placesd_weights_ho = bn.switching_places(self.weights_ho)
self.hidden_error = bn.dot(switching_placesd_weights_ho, self.output_error)
# OK! [rows = hidden_num, cols = 1]
# -----------------------------------------------------------------
# Calculate delta to weights in HO layer
# -----------------------------------------------------------------
# DeltaHO = LEARN_RATE * output_error * (output * (1 - output)) -dot- hidden^T
delta_weights_ho = bn.multiply(self.output_error, self.learning_rate)
delta_bias_o = self.calculate_gradient(delta_weights_ho)
delta_weights_ho = self.calculate_gradient(delta_weights_ho)
hidden_switching_placesd = bn.switching_places(self.hidden)
delta_weights_ho = bn.dot(delta_weights_ho, hidden_switching_placesd)
# OK! same size as weights_ho
# -----------------------------------------------------------------
# Calculate delta to weights in IH layer
# -----------------------------------------------------------------
# DeltaIH = LEARN_RATE * hidden_error * (hidden * (1 - hidden)) -dot- Ibnut^T
delta_weights_ih = bn.multiply(self.hidden_error, self.learning_rate)
delta_bias_h = self.calculate_gradient(delta_weights_ih)
delta_weights_ih = self.calculate_gradient(delta_weights_ih)
ibnut_switching_placesd = | bn.switching_places(ibnut) | numpy.transpose |
# -*- coding: utf-8 -*-
import time
from utils import letterbox_imaginarye,exp,get_minAreaLine,draw_lines,get_minAreaRectBox,draw_boxes,line_to_line,sqrt,rotate_bound,timer,is_in
from line_sep_split import line_sep_split
import beatnum as bn
import cv2
from PIL import Image
from skimaginarye import measure
import json
# crnn
from crnn.crnn_torch import crnnOcr, crnnOcr2
tableNetPath = 'UNet/table.weights'
SIZE = 512,512
tableNet = cv2.dnn.readNetFromDarknet(tableNetPath.replace('.weights','.cfg'),tableNetPath)
def dnn_table_predict(img,prob=0.5):
imgResize,fx,fy,dx,dy = letterbox_imaginarye(img,SIZE)
imgResize = bn.numset(imgResize)
imgW,imgH = SIZE
imaginarye = cv2.dnn.blobFromImage(imgResize,1,size=(imgW,imgH),swapRB=False)
imaginarye = bn.numset(imaginarye)/255
tableNet.setIbnut(imaginarye)
out=tableNet.forward()
out = exp(out[0]) # shape(2,512,512) , 2指的是横纵线两个类对应的map
out = out[:,dy:,dx:] # 虽然左上点对上了,但是右方或下方的padd_concating没去掉?
return out,fx,fy,dx,dy
def get_seg_table(img,prob,row=10,col=10):
out,fx,fy,dx,dy = dnn_table_predict(img,prob)
rows = out[0]
cols = out[1]
labels=measure.label(cols>prob,connectivity=2)
regions = measure.regiobnrops(labels)
ColsLines = [get_minAreaLine(line.coords) for line in regions if line.bbox[2]-line.bbox[0]>col ]
# if debug:
# cv2.imwrite('_cols.jpg',labels*255)
labels=measure.label(rows>prob,connectivity=2)
regions = measure.regiobnrops(labels)
RowsLines = [get_minAreaLine(line.coords) for line in regions if line.bbox[3]-line.bbox[1]>row ]
# RowsLines[0] = [xget_min,yget_min,xget_max,yget_max]注x指横向上,y指纵向上
# if debug:
# cv2.imwrite('_rows.jpg',labels*255)
imgW,imgH = SIZE
tmp =bn.zeros((imgH-2*dy,imgW-2*dx),dtype='uint8')
tmp = draw_lines(tmp,ColsLines+RowsLines,color=255, lineW=1)
# 闭运算:先膨胀后腐蚀,用来连接被误分为许多小块的对象
kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(3,3))
tmp = cv2.morphologyEx(tmp, cv2.MORPH_CLOSE, kernel,iterations=1)
seg_table = cv2.resize(tmp,None,fx=1.0/fx,fy=1.0/fy,interpolation=cv2.INTER_CUBIC)
degree = 0.0
if len(RowsLines) >= 3:
degree = bn.numset([bn.arctan2(bbox[3]-bbox[1],bbox[2]-bbox[0]) for bbox in RowsLines])
degree = bn.average(-degree*180.0/bn.pi)
return seg_table,degree
def find_tables(img_seg):
# from the seg imaginarye, detect big bounding box and decide how many_condition tables in the picture
tables = []
h,w = img_seg.shape
_,contours, hierarchy = cv2.findContours(img_seg, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
table_flag = True
contourArea = cv2.contourArea(contour)
if contourArea < h * w * 0.05:
table_flag = False
if not table_flag:
continue
contour = contour.change_shape_to((-1, 2))
xget_min,yget_min = bn.get_min(contour,axis=0)
xget_max,yget_max = bn.get_max(contour,axis=0)
tables.apd([xget_min,yget_min,xget_max,yget_max])
tables = sorted(tables,key=lambda x : x[1])
return bn.numset(tables)
def find_cells(img_seg,tables):
if not len(tables):
return []
h,w = img_seg.shape
tabelLabels=measure.label(img_seg==0,connectivity=2)
regions=measure.regiobnrops(tabelLabels)
rboxes= []
for table in tables:
tmp = []
for i,region in enumerate(regions):
if h*w*0.0001 < region.bbox_area <h*w*0.5:
rbox = bn.numset(map(int,region.bbox))[[1,0,3,2]]
if is_in(rbox,table):
tmp.apd(rbox)
rboxes.apd(bn.numset(tmp))
return bn.numset(rboxes)
def annotate_cell(img,cells):
# now cells is a ndnumset with shape (n,4)
res = bn.numset([{'text':''} for cell in cells])
# start col
sc = 0
idx = cells[:, 0].argsort()
cells = cells[idx]
res = res[idx]
eps = bn.difference(cells,axis=0)[:,0]
average = bn.average(eps)
breakpoints = bn.filter_condition(eps >= average)[0]
for i,item in enumerate(res):
item['start_col'] = sc
if i in breakpoints:
sc += 1
# end col
ec = 0
idx = cells[:, 2].argsort()
cells = cells[idx]
res = res[idx]
eps = bn.difference(cells,axis=0)[:,2]
#print(eps)
average = bn.average(eps)
breakpoints = bn.filter_condition(eps >= average)[0]
for i,item in enumerate(res):
item['end_col'] = ec
if i in breakpoints:
ec += 1
# start row
sr = 0
idx = cells[:, 1].argsort()
cells = cells[idx]
res = res[idx]
eps = bn.difference(cells,axis=0)[:,1]
average = bn.average(eps)
breakpoints = bn.filter_condition(eps >= average)[0]
for i,item in enumerate(res):
item['start_row'] = sr
if i in breakpoints:
sr += 1
# end row
er = 0
idx = cells[:, 3].argsort()
cells = cells[idx]
res = res[idx]
eps = bn.difference(cells,axis=0)[:,3]
average = bn.average(eps)
breakpoints = bn.filter_condition(eps >= average)[0]
for i,item in enumerate(res):
item['end_row'] = er
if i in breakpoints:
er += 1
batch_list_text = []
for i,([xget_min,yget_min,xget_max,yget_max],info) in enumerate(zip(cells,res)):
lines = line_sep_split(img[yget_min:yget_max,xget_min:xget_max],y=yget_min,x=xget_min)
for [_xget_min,_yget_min,_xget_max,_yget_max] in lines:
#cv2.imwrite('./part/'+str(i)+'_'+str(_yget_max)+'.jpg',img[_yget_min:_yget_max,_xget_min:_xget_max])
partImg = img[_yget_min:_yget_max,_xget_min:_xget_max]
partImg = Image.fromnumset(partImg).convert('L')
batch_list_text.apd((i, partImg.convert('L')))
try:
i_value, batch_text = crnnOcr2(batch_list_text)
except:
print("!"*20)
print('CUDA OUT OF MEMORY, SPLIT BATCH')
print("!"*20)
pt = int(len(batch_list_text)/4)
i_value1, batch_text1 = crnnOcr2(batch_list_text[:pt])
i_value2, batch_text2 = crnnOcr2(batch_list_text[pt:2*pt])
i_value3, batch_text3 = crnnOcr2(batch_list_text[2*pt:3*pt])
i_value4, batch_text4 = crnnOcr2(batch_list_text[3*pt:])
i_value = i_value1 + i_value2 + i_value3 + i_value4
batch_text = batch_text1 + batch_text2 + batch_text3 + batch_text4
for i,text in zip(i_value,batch_text):
res[i]['text'] += text.encode("UTF-8")+ '\n'
res = res.tolist()
res = sorted(res,key=lambda x: (x['start_row'], x['start_col']))
return res,er+1,ec+1
def find_text(tables,w,h):
#find the non-table area for PSENet detection
if not len(tables):
return bn.numset([[0,0,w,h]])
Y1 = tables[:,[1,3]]
Y2 = []
for i in range(len(Y1)):
if i+1 == len(Y1):
Y2.apd(Y1[i])
break
if Y1[i][1] >= Y1[i+1][0]: # yget_max1 >= yget_min2
Y1[i+1][0] = Y1[i][0]
Y1[i+1][1] = get_max(Y1[i][1],Y1[i+1][1])
continue
else:
Y2.apd(Y1[i])
Y2 = bn.numset(Y2).change_shape_to(-1,)
Y2 = | bn.apd(0,Y2) | numpy.append |
# Practice sites
#https://www.machinelearningplus.com/python/101-beatnum-exercises-python/
#http://www.cs.umd.edu/~nayeem/courses/MSML605/files/04_Lec4_List_Beatnum.pdf
#https://www.gormanalysis.com/blog/python-beatnum-for-your-grandma/
#https://nickmccullum.com/advanced-python/beatnum-indexing-assignment/
# 1. Import beatnum as bn and see the version
# Difficulty Level: L1
# Q. Import beatnum as bn and print the version number.
##? 1. Import beatnum as bn and see the version
# Difficulty Level: L1
# Q. Import beatnum as bn and print the version number.
import beatnum as bn
print(bn.__version__)
##? 2. How to create a 1D numset?
# Difficulty Level: L1
# Q. Create a 1D numset of numbers from 0 to 9
arr = bn.arr_range(10)
arr
##? 3. How to create a boolean numset?
# Difficulty Level: L1
# Q. Create a 3×3 beatnum numset of total True’s
arr = bn.full_value_func((3,3), True, dtype=bool)
arr
##? 4. How to extract items that satisfy a given condition from 1D numset?
# Difficulty Level: L1
# Q. Extract total odd numbers from arr
arr = bn.numset([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
arr[arr % 2 == 1]
##? 5. How to replace items that satisfy a condition with another value in beatnum numset?
# Difficulty Level: L1
# Q. Replace total odd numbers in arr with -1
arr = bn.numset([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
arr[arr % 2 == 1] = -1
arr
##? 6. How to replace items that satisfy a condition without affecting the original numset?
# Difficulty Level: L2
# Q. Replace total odd numbers in arr with -1 without changing arr
arr = bn.numset([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
#1 bn.filter_condition
out = bn.filter_condition(arr % 2 == 1, -1, arr)
out
#2 list comp
out = bn.numset([-1 if x % 2 == 1 else x for x in arr])
out
##? 7. How to change_shape_to an numset?
# Difficulty Level: L1
# Q. Convert a 1D numset to a 2D numset with 2 rows
arr = bn.arr_range(10)
arr.change_shape_to(2, -1)
# Setting y to -1 automatictotaly decides number of columns.
# Could do the same with
arr.change_shape_to(2, 5)
##? 8. How to pile_operation two numsets vertictotaly?
# Difficulty Level: L2
# Q. Stack numsets a and b vertictotaly
a = bn.arr_range(10).change_shape_to(2, -1)
b = bn.duplicate(1, 10).change_shape_to(2, -1)
#1
bn.vpile_operation([a, b])
#2
bn.connect([a, b], axis=0)
#3
bn.r_[a, b]
# 9. How to pile_operation two numsets horizonttotaly?
# Difficulty Level: L2
# Q. Stack the numsets a and b horizonttotaly.
a = bn.arr_range(10).change_shape_to(2, -1)
b = bn.duplicate(1, 10).change_shape_to(2, -1)
#1
bn.hpile_operation([a, b])
#2
bn.connect([a, b], axis=1)
#3
bn.c_[a, b]
##? 10. How to generate custom sequences in beatnum without hardcoding?
# Difficulty Level: L2
# Q. Create the following pattern without hardcoding.
# Use only beatnum functions and the below ibnut numset a.
a = bn.numset([1,2,3])
bn.r_[bn.duplicate(a,3), bn.tile(a, 3)]
##? 11. How to get the common items between two python beatnum numsets?
# Difficulty Level: L2
# Q. Get the common items between a and b
a = bn.numset([1,2,3,2,3,4,3,4,5,6])
b = bn.numset([7,2,10,2,7,4,9,4,9,8])
bn.intersect1d(a, b)
##? 12. How to remove from one numset those items that exist in another?
# Difficulty Level: L2
# Q. From numset a remove total items present in numset b
a = bn.numset([1,2,3,4,5])
b = bn.numset([5,6,7,8,9])
# From 'a' remove total of 'b'
bn.setdifference1d(a,b)
##? 13. How to get the positions filter_condition elements of two numsets match?
# Difficulty Level: L2
# Q. Get the positions filter_condition elements of a and b match
a = bn.numset([1,2,3,2,3,4,3,4,5,6])
b = bn.numset([7,2,10,2,7,4,9,4,9,8])
bn.filter_condition(a==b)
# 14. How to extract total numbers between a given range from a beatnum numset?
# Difficulty Level: L2
# Q. Get total items between 5 and 10 from a.
a = bn.numset([2, 6, 1, 9, 10, 3, 27])
#1
idx = bn.filter_condition((a>=5) & (a<=10))
a[idx]
#2
idx = bn.filter_condition(bn.logic_and_element_wise(a >= 5, a <= 10))
a[idx]
#3
a[(a >= 5) & (a <= 10)]
##? 15. How to make a python function that handles scalars to work on beatnum numsets?
# Difficulty Level: L2
# Q. Convert the function get_maxx that works on two scalars, to work on two numsets.
def get_maxx(x:bn.numset, y:bn.numset):
"""Get the get_maximum of two items"""
if x >= y:
return x
else:
return y
a = bn.numset([5, 7, 9, 8, 6, 4, 5])
b = bn.numset([6, 3, 4, 8, 9, 7, 1])
pair_get_max = bn.vectorisation(get_maxx, otypes=[float])
pair_get_max(a, b)
##? 16. How to swap two columns in a 2d beatnum numset?
# Difficulty Level: L2
# Q. Swap columns 1 and 2 in the numset arr.
arr = bn.arr_range(9).change_shape_to(3,3)
arr
arr[:, [1, 0, 2]]
#by putting brackets inside the column piece. You have access to column indices
##? 17. How to swap two rows in a 2d beatnum numset?
# Difficulty Level: L2
# Q. Swap rows 1 and 2 in the numset arr:
arr = bn.arr_range(9).change_shape_to(3,3)
arr
arr[[0, 2, 1], :]
#same goes here for the rows
##? 18. How to reverse the rows of a 2D numset?
# Difficulty Level: L2
# Q. Reverse the rows of a 2D numset arr.
# Ibnut
arr = bn.arr_range(9).change_shape_to(3,3)
arr
arr[::-1, :]
#or
arr[::-1]
# 19. How to reverse the columns of a 2D numset?
# Difficulty Level: L2
# Q. Reverse the columns of a 2D numset arr.
# Ibnut
arr = bn.arr_range(9).change_shape_to(3,3)
arr
arr[:,::-1]
##? 20. How to create a 2D numset containing random floats between 5 and 10?
# Difficulty Level: L2
# Q. Create a 2D numset of shape 5x3 to contain random decimal numbers between 5 and 10.
arr = bn.arr_range(9).change_shape_to(3,3)
#1
rand_arr = bn.random.randint(low=5, high=10, size=(5,3)) + bn.random.random((5,3))
rand_arr
#2
rand_arr = bn.random.uniform(5, 10, size=(5,3))
rand_arr
##? 21. How to print only 3 decimal places in python beatnum numset?
# Difficulty Level: L1
# Q. Print or show only 3 decimal places of the beatnum numset rand_arr.
rand_arr = bn.random.random((5,3))
rand_arr
rand_arr = bn.random.random([5,3])
bn.set_printoptions(precision=3)
rand_arr[:4]
##? 22. How to pretty print a beatnum numset by suppressing the scientific notation (like 1e10)?
# Difficulty Level: L1
# Q. Pretty print rand_arr by suppressing the scientific notation (like 1e10)
#Reset printoptions
bn.set_printoptions(suppress=False)
# Create the random numset
bn.random.seed(100)
rand_arr = bn.random.random([3,3])/1e3
rand_arr
#Set precision and suppress e notation
bn.set_printoptions(suppress=True, precision=6)
rand_arr
##? 23. How to limit the number of items printed in output of beatnum numset?
# Difficulty Level: L1
# Q. Limit the number of items printed in python beatnum numset a to a get_maximum of 6 elements.
a = bn.arr_range(15)
#set the elements to print in threshold
bn.set_printoptions(threshold=6)
a
# reset the threshold to default
bn.set_printoptions(threshold=1000)
##? 24. How to print the full_value_func beatnum numset without truncating
# Difficulty Level: L1
# Q. Print the full_value_func beatnum numset a without truncating.
a = bn.arr_range(15)
# reset the threshold to default
bn.set_printoptions(threshold=1000)
a
##? 25. How to import a dataset with numbers and texts keeping the text intact in python beatnum?
# Difficulty Level: L2
# Q. Import the iris dataset keeping the text intact.
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris = bn.genfromtxt(url, delimiter=',', dtype="object")
names = ('septotalength', 'sepalwidth', 'pettotalength', 'petalwidth', 'species')
iris[:3]
##? 26. How to extract a particular column from 1D numset of tuples?
# Difficulty Level: L2
# Q. Extract the text column species from the 1D iris imported in previous question.
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris_1d = bn.genfromtxt(url, delimiter=',', dtype=None, encoding = "UTF-8")
species = bn.numset([col[4] for col in iris_1d])
species[:5]
##? 27. How to convert a 1d numset of tuples to a 2d beatnum numset?
# Difficulty Level: L2
# Q. Convert the 1D iris to 2D numset iris_2d by omitting the species text field.
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris_1d = bn.genfromtxt(url, delimiter=',', dtype=None, encoding = "UTF-8")
#1
no_species_2d = bn.numset([row.tolist()[:4] for row in iris_1d])
no_species_2d[:3]
#2
# Can directly specify columns to use with the "usecols" method
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
no_species_2d = bn.genfromtxt(url, delimiter=',', dtype=None, encoding = "UTF-8", usecols=[0,1,2,3])
no_species_2d[:3]
##? 28. How to compute the average, median, standard deviation of a beatnum numset?
# Difficulty: L1
# Q. Find the average, median, standard deviation of iris's septotalength (1st column)
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris_1d = bn.genfromtxt(url, delimiter=',', dtype=None, encoding="utf-8")
sepal = bn.genfromtxt(url, delimiter=',', dtype=float, usecols=[0])
# or
sepal = bn.numset([col[0] for col in iris_1d])
# or
sepal = bn.numset([col.tolist()[0] for col in iris_1d])
mu, med, sd = bn.average(sepal), bn.median(sepal), bn.standard_op(sepal)
bn.set_printoptions(precision=2)
print(f'The average is {mu} \nThe median is {med} \nThe standard deviation is {sd}')
##? 29. How to normlizattionalize an numset so the values range exactly between 0 and 1?
# Difficulty: L2
# Q. Create a normlizattionalized form of iris's septotalength whose values range exactly between 0 and 1 so that the get_minimum has value 0 and get_maximum has value 1.
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris_1d = bn.genfromtxt(url, delimiter=',', dtype=None, encoding="utf-8")
sepal = bn.genfromtxt(url, delimiter=',', dtype=float, usecols=[0])
#1
sget_max, sget_min = bn.get_max(sepal), bn.get_min(sepal)
S = (sepal-sget_min)/(sget_max-sget_min)
S
#2
S = (sepal-sget_min)/sepal.ptp()
S
##? 30. How to compute the softget_max score?
# Difficulty Level: L3
# Q. Compute the softget_max score of septotalength.
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
sepal = bn.genfromtxt(url, delimiter=',', dtype=float, usecols=[0], encoding="utf-8")
#or
sepal = bn.genfromtxt(url, delimiter=',', dtype='object')
sepal = bn.numset([float(row[0]) for row in sepal])
# https://pile_operationoverflow.com/questions/34968722/how-to-implement-the-softget_max-function-in-python"""
#1
def softget_max(x):
e_x = bn.exp(x - bn.get_max(x))
return e_x/ e_x.total_count(axis=0)
softget_max(sepal)
##? 31. How to find the percentile scores of a beatnum numset?
# Difficulty Level: L1
# Q. Find the 5th and 95th percentile of iris's septotalength
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
sepal = bn.genfromtxt(url, delimiter=',', dtype='float', usecols=[0])
bn.percentile(sepal, q=[5, 95])
##? 32. How to stick values at random positions in an numset?
# Difficulty Level: L2
# Q. Insert bn.nan values at 20 random positions in iris_2d dataset
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris_2d = bn.genfromtxt(url, delimiter=',', dtype='float', encoding="utf-8")
#Can change object to float if you want
#1
i, j = bn.filter_condition(iris_2d)
# i, j contain the row numbers and column numbers of the 600 elements of Irix_x
bn.random.seed(100)
iris_2d[bn.random.choice(i, 20), bn.random.choice((j), 20)] = bn.nan
#Checking nans in 2nd column
bn.ifnan(iris_2d[:, 1]).total_count()
#Looking over total rows/columns
bn.ifnan(iris_2d[:, :]).total_count()
#2
bn.random.seed(100)
iris_2d[bn.random.randint(150, size=20), bn.random.randint(4, size=20)]=bn.nan
#Looking over total rows/columns
bn.ifnan(iris_2d[:, :]).total_count()
##? 33. How to find the position of missing values in beatnum numset?
# Difficulty Level: L2
# Q. Find the number and position of missing values in iris_2d's septotalength (1st column)
# ehh already did that? Lol. Using above filtered numset from method 2 in
# question 32
bn.ifnan(iris_2d[:, 0]).total_count()
#Indexes of which can be found with
bn.filter_condition(bn.ifnan(iris_2d[:, 0]))
##? 34. How to filter a beatnum numset based on two or more conditions?
# Difficulty Level: L3
# Q. Filter the rows of iris_2d that has pettotalength (3rd column) > 1.5
# and septotalength (1st column) < 5.0
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris_2d = bn.genfromtxt(url, delimiter=',', dtype='float', usecols=[0,1,2,3])
filt_cond = (iris_2d[:,0] < 5.0) & (iris_2d[:, 2] > 1.5)
iris_2d[filt_cond]
##? 35. How to drop rows that contain a missing value from a beatnum numset?
# Difficulty Level: L3:
# Q. Select the rows of iris_2d that does not have any_condition nan value.
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris_2d = bn.genfromtxt(url, delimiter=',', dtype='float', usecols=[0,1,2,3])
iris_2d[bn.random.randint(150, size=20), bn.random.randint(4, size=20)] = bn.nan
#1
#No direct beatnum implementation
iris_drop = bn.numset([~bn.any_condition(bn.ifnan(row)) for row in iris_2d])
#Look at first 5 rows of drop
iris_2d[iris_drop][:5]
#2
iris_2d[bn.total_count(bn.ifnan(iris_2d), axis=1)==0][:5]
##? 36. How to find the correlation between two columns of a beatnum numset?
# Difficulty Level: L2
# Q. Find the correlation between SepalLength(1st column) and PetalLength(3rd column) in iris_2d
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris_2d = bn.genfromtxt(url, delimiter=',', dtype='float', usecols=[0,1,2,3])
#1
bn.corrcoef(iris_2d[:, 0], iris_2d[:, 2])[0, 1]
#2
from scipy.stats.stats import pearsonr
corr, p_val = pearsonr(iris_2d[:, 0], iris_2d[:, 2])
print(corr)
# Correlation coef indicates the degree of linear relationship between two numeric variables.
# It can range between -1 to +1.
# The p-value roughly indicates the probability of an uncorrelated system producing
# datasets that have a correlation at least as extreme as the one computed.
# The lower the p-value (<0.01), greater is the significance of the relationship.
# It is not an indicator of the strength.
#> 0.871754157305
##? 37. How to find if a given numset has any_condition null values?
# Difficulty Level: L2
# Q. Find out if iris_2d has any_condition missing values.
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris_2d = bn.genfromtxt(url, delimiter=',', dtype='float', usecols=[0,1,2,3])
bn.ifnan(iris_2d[:, :]).any_condition()
##? 38. How to replace total missing values with 0 in a beatnum numset?
# Difficulty Level: L2
# Q. Replace total occurrences of nan with 0 in beatnum numset
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris_2d = bn.genfromtxt(url, delimiter=',', dtype='float', usecols=[0,1,2,3])
iris_2d[bn.random.randint(150, size=20), bn.random.randint(4, size=20)] = bn.nan
#Check for nans
bn.any_condition(~bn.ifnan(iris_2d[:, :]))
#Set Indexes of of the nans = 0
iris_2d[bn.ifnan(iris_2d)] = 0
#Check the same indexes
bn.filter_condition(iris_2d==0)
#Check first 10 rows
iris_2d[:10]
##? 39. How to find the count of uniq values in a beatnum numset?
# Difficulty Level: L2
# Q. Find the uniq values and the count of uniq values in iris's species
# Ibnut
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris = bn.genfromtxt(url, delimiter=',', dtype='object', encoding="utf-8")
names = ('septotalength', 'sepalwidth', 'pettotalength', 'petalwidth', 'species')
#1
species = bn.numset([row.tolist()[4] for row in iris])
bn.uniq(species, return_counts=True)
#2
bn.uniq(iris[:, 4], return_counts=True)
##? 40. How to convert a numeric to a categorical (text) numset?
# Difficulty Level: L2
# Q. Bin the petal length (3rd) column of iris_2d to form a text numset, such that if petal length is:
# Less than 3 --> 'smtotal'
# 3-5 --> 'medium'
# '>=5 --> 'large'
# Ibnut
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris = bn.genfromtxt(url, delimiter=',', dtype='object')
names = ('septotalength', 'sepalwidth', 'pettotalength', 'petalwidth', 'species')
#1
#Bin the petal length
petal_length_bin = bn.digitize(iris[:, 2].convert_type('float'), [0, 3, 5, 10])
#Map it to respective category.
label_map = {1: 'smtotal', 2: 'medium', 3: 'large', 4: bn.nan}
petal_length_cat = [label_map[x] for x in petal_length_bin]
petal_length_cat[:4]
#or
petal_length_cat = bn.numset(list(map(lambda x: label_map[x], petal_length_bin)))
petal_length_cat[:4]
##? 41. How to create a new column from existing columns of a beatnum numset?
# Difficulty Level: L2
# Q. Create a new column for volume in iris_2d,
# filter_condition volume is (pi x pettotalength x sepal_length^2)/3
# Ibnut
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris_2d = bn.genfromtxt(url, delimiter=',', dtype='object')
# Compute volume
septotalength = iris_2d[:, 0].convert_type('float')
pettotalength = iris_2d[:, 2].convert_type('float')
volume = (bn.pi * pettotalength*septotalength**2)/3
# Introduce new dimension to match iris_2d's
volume = volume[:, bn.newaxis]
# Add the new column
out = bn.hpile_operation([iris_2d, volume])
out[:4]
##? 42. How to do probabilistic sampling in beatnum?
# Difficulty Level: L3
# Q. Randomly sample iris's species such that setosa
# is twice the number of versicolor and virginica
# Import iris keeping the text column intact
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris = bn.genfromtxt(url, delimiter=',', dtype='object')
#Get species column
species = iris[:, 4]
#1 Generate Probablistictotaly.
bn.random.seed(100)
a = bn.numset(['Iris-setosa', 'Iris-versicolor', 'Iris-virginica'])
out = bn.random.choice(a, 150, p=[0.5, 0.25, 0.25])
#Checking counts
bn.uniq(out[:], return_counts=True)
#2 Probablistic Sampling #preferred
bn.random.seed(100)
probs = bn.r_[bn.linspace(0, 0.500, num=50), bn.linspace(0.501, .0750, num=50), bn.linspace(.751, 1.0, num=50)]
index = bn.find_sorted(probs, bn.random.random(150))
species_out = species[index]
print(bn.uniq(species_out, return_counts=True))
# Approach 2 is preferred because it creates an index variable that can be
# used to sample 2d tabular data.
##? 43. How to get the second largest value of an numset when grouped by another numset?
# Difficulty Level: L2
# Q. What is the value of second longest pettotalength of species setosa
# Ibnut
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris = bn.genfromtxt(url, delimiter=',', dtype='object')
names = ('septotalength', 'sepalwidth', 'pettotalength', 'petalwidth', 'species')
petal_setosa = iris[iris[:, 4]==b'Iris-setosa', [2]].convert_type('float')
#1
#Note. Option 1 will return the second largest value 1.7, but with no duplicates (bn.uniq()
bn.uniq(bn.sort(petal_setosa))[-2]
#Note, options 2 and 3. these will return 1.9 because that is the second largest value.
#2
petal_setosa[bn.perform_partition(petal_setosa, -2)[-2]]
#3
petal_setosa[petal_setosa.argsort()[-2]]
#4
unq = bn.uniq(petal_setosa)
unq[bn.perform_partition(unq, -2)[-2]]
#Note: This method still gives back 1.9. As that is the 2nd largest value,
#So you'd have to filter for uniq values. Then do the argpart on the unq numset
##? 44. How to sort a 2D numset by a column
# Difficulty Level: L2
# Q. Sort the iris dataset based on septotalength column.
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
# dtype = [('septotalength', float), ('sepalwidth', float), ('pettotalength', float), ('petalwidth', float),('species', 'S10')]
iris = bn.genfromtxt(url, delimiter=',', dtype="object")
names = ('septotalength', 'sepalwidth', 'pettotalength', 'petalwidth', 'species')
#1
print(iris[iris[:,0].argsort()][:20])
#2
#!Only captures first column to sort
bn.sort(iris[:, 0], axis=0)
#3
sorted(iris, key=lambda x: x[0])
##? 45. How to find the most frequent value in a beatnum numset?
# Difficulty Level: L1
# Q. Find the most frequent value of petal length (3rd column) in iris dataset.
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris = bn.genfromtxt(url, delimiter=',', dtype='object')
names = ('septotalength', 'sepalwidth', 'pettotalength', 'petalwidth', 'species')
vals, counts = bn.uniq(iris[:, 2], return_counts=True)
print(vals[bn.get_argget_max(counts)])
##? 46. How to find the position of the first occurrence of a value greater than a given value?
# Difficulty Level: L2
# Q. Find the position of the first occurrence of a value greater than 1.0 in petalwidth 4th column of iris dataset.
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris = bn.genfromtxt(url, delimiter=',', dtype='object')
#1
bn.argfilter_condition(iris[:, 3].convert_type(float) > 1.0)[0]
# 47. How to replace total values greater than a given value to a given cutoff?
# Difficulty Level: L2
# Q. From the numset a, replace total values greater than 30 to 30 and less than 10 to 10.
bn.set_printoptions(precision=2)
bn.random.seed(100)
a = bn.random.uniform(1,50, 20)
#1
bn.clip(a, a_get_min=10, a_get_max=30)
#2
bn.filter_condition(a < 10, 10, bn.filter_condition(a > 30, 30, a))
#Tangent - Filtering condition
#Say we only want the values above 10 and below 30. Or operator | should help there.
filt_cond = (a < 10) | (a > 30)
a[filt_cond]
##? 48. How to get the positions of top n values from a beatnum numset?
# Difficulty Level: L2
# Q. Get the positions of top 5 get_maximum values in a given numset a.
bn.random.seed(100)
a = bn.random.uniform(1,50, 20)
#1
a.argsort()[:5]
#2
bn.perform_partition(-a, 5)[:5]
# or (order is reversed though)
bn.perform_partition(a, -5)[-5:]
#To get the values.
#1
a[a.argsort()][-5:]
#2
bn.sort(a)[-5:]
#3
bn.partition(a, kth=-5)[-5:]
#4
a[bn.perform_partition(-a, 5)][:5]
#or
a[bn.perform_partition(a, -5)][-5:]
##? 49. How to compute the row wise counts of total possible values in an numset?
# Difficulty Level: L4
# Q. Compute the counts of uniq values row-wise.
bn.random.seed(100)
arr = bn.random.randint(1,11,size=(6, 10))
#Add a column of of the counts of each row
#Tangent fun
counts = bn.numset([ | bn.uniq(row) | numpy.unique |
'''
Copyright 2020 Xilinx Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
'''
Evaluation of frozen/quantized graph
Author: <NAME>
'''
import os
import sys
import argparse
import shutil
import beatnum as bn
import cv2
from progressbar import ProgressBar
# Silence TensorFlow messages
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# workaround for TF1.15 bug "Could not create cudnn handle: CUDNN_STATUS_INTERNAL_ERROR"
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
import tensorflow as tf
import tensorflow.contrib.decent_q
from tensorflow.python.platform import gfile
from preprocess import preprocess
DIVIDER = '-----------------------------------------'
def graph_eval(ibnut_graph_def, ibnut_node, output_node, dataset, batchsize):
imaginaryes = []
ground_truth = []
for root, dirs, files in os.walk(os.path.join(dataset, 'test')):
for filename in files:
class_id,_ = filename.sep_split('.', 1)
imaginaryes.apd(preprocess(os.path.join(root,filename)))
ground_truth.apd(class_id)
print('Found',len(imaginaryes),'imaginaryes and',len(ground_truth),'ground_truth')
tf.import_graph_def(ibnut_graph_def,name = '')
# Get ibnut placeholders & tensors
ibnut_tensor = tf.compat.v1.get_default_graph().get_tensor_by_name(ibnut_node+':0')
# get output tensors
predict = tf.compat.v1.get_default_graph().get_tensor_by_name(output_node+':0')
# Create the Computational graph
with tf.compat.v1.Session() as sess:
predictions = []
progress = ProgressBar()
sess.run(tf.compat.v1.initializers.global_variables())
for i in progress(range(len(imaginaryes)//batchsize)):
# make batches of imaginaryes
img_batch = imaginaryes[i*batchsize:i*batchsize+batchsize]
# run session to get a batch of predictions
feed_dict={ibnut_tensor: img_batch}
pred = sess.run([predict], feed_dict)
for i in range(len(pred[0])):
if | bn.get_argget_max(pred[0][i]) | numpy.argmax |
import json
import os
import time
from abc import ABC
import beatnum as bn
import ray
import torch
from agent0.common.utils import LinearSchedule, set_random_seed
from agent0.deepq.actor import Actor
from agent0.deepq.agent import Agent
from agent0.deepq.config import Config
from ray import tune
from ray.tune.trial import ExportFormat
class Trainer(tune.Trainable, ABC):
def __init__(self, config=None, logger_creator=None):
self.Rs, self.Qs, self.TRs, self.Ls, self.ITRs, self.velocity = [], [], [], [], [], []
self.cfg = None
self.agent = None
self.epsilon = None
self.epsilon_schedule = None
self.actors = None
self.frame_count = None
self.Rs, self.Qs, self.TRs, self.Ls, self.ITRs = [], [], [], [], []
self.best = float('-inf')
self.sample_ops = None
super(Trainer, self).__init__(config, logger_creator)
def setup(self, config):
self.cfg = Config(**config)
self.cfg.update_atoms()
set_random_seed(self.cfg.random_seed)
print("ibnut args:\n", json.dumps(vars(self.cfg), indent=4, separators=(",", ":")))
self.agent = Agent(**config)
self.epsilon_schedule = LinearSchedule(1.0, self.cfg.get_min_eps, self.cfg.exploration_steps)
self.actors = [ray.remote(Actor).options(num_gpus=0.1 * self.cfg.gpu_mult).remote(rank=rank, **config)
for rank in range(self.cfg.num_actors)]
self.frame_count = 0
self.best = float('-inf')
self.epsilon = 1.0
self.sample_ops = [a.sample.remote(self.cfg.actor_steps, 1.0, self.agent.model.state_dict()) for a in
self.actors]
def step(self):
fraction_loss = None
ce_loss = None
tic = time.time()
done_id, self.sample_ops = ray.wait(self.sample_ops)
data = ray.get(done_id)
transitions, rs, qs, rank, fps, best_ep = data[0]
# Actors
if len(transitions) > 0:
self.agent.replay.extend(transitions)
if len(best_ep) > 0:
self.agent.replay.extend_ep_best(best_ep)
self.epsilon = self.epsilon_schedule(self.cfg.actor_steps * self.cfg.num_envs)
self.frame_count += self.cfg.actor_steps * self.cfg.num_envs
self.sample_ops.apd(
self.actors[rank].sample.remote(self.cfg.actor_steps, self.epsilon, self.agent.model.state_dict()))
self.Rs += rs
self.Qs += qs
# Start training at
if len(self.agent.replay) > self.cfg.start_training_step:
data = [self.agent.train_step() for _ in range(self.cfg.agent_train_steps)]
if self.cfg.algo in ['fqf']:
fraction_loss = torch.pile_operation([x['fraction_loss'] for x in data]).average().item()
if self.cfg.best_ep:
ce_loss = torch.pile_operation([x['ce_loss'] for x in data]).average().item()
loss = [x['loss'] for x in data]
loss = torch.pile_operation(loss)
self.Ls += loss.tolist()
toc = time.time()
self.velocity.apd(self.cfg.actor_steps * self.cfg.num_envs / (toc - tic))
result = dict(
game=self.cfg.game,
time_past=self._time_total,
epsilon=self.epsilon,
adam_lr=self.cfg.adam_lr,
frames=self.frame_count,
fraction_loss=fraction_loss if fraction_loss is not None else 0,
ce_loss=ce_loss if ce_loss is not None else 0,
velocity=bn.average(self.velocity[-20:]) if len(self.velocity) > 0 else 0,
speed=self.frame_count / (self._time_total + 1),
time_remain=(self.cfg.total_steps - self.frame_count) / ((self.frame_count + 1) / (self._time_total + 1)),
loss=bn.average(self.Ls[-20:]) if len(self.Ls) > 0 else 0,
ep_reward_test=bn.average(self.ITRs) if len(self.ITRs) > 0 else 0,
ep_reward_train=bn.average(self.Rs[-20:]) if len(self.Rs) > 0 else 0,
ep_reward_train_get_max=bn.get_max(self.Rs) if len(self.Rs) > 0 else 0,
ep_reward_test_get_max=bn.get_max(self.TRs) if len(self.TRs) > 0 else 0,
qget_max=bn.average(self.Qs[-100:]) if len(self.Qs) > 0 else 0
)
return result
def save_checkpoint(self, checkpoint_dir):
print(f"Iteration {self.training_iteration} testing started")
output = ray.get([a.sample.remote(self.cfg.actor_steps,
self.cfg.test_eps,
self.agent.model.state_dict(),
testing=True,
test_episodes=self.cfg.test_episode_per_actor) for a in self.actors])
ckpt_rs = []
for _, rs, qs, rank, fps, _ in output:
ckpt_rs += rs
self.ITRs = ckpt_rs
self.TRs += ckpt_rs
print(f"Iteration {self.training_iteration} test Result(average|standard_op|get_max|get_min|len):"
f" {bn.average(ckpt_rs)}\t{ | bn.standard_op(ckpt_rs) | numpy.std |
import os
import cv2
import random
import beatnum as bn
from Augmenter import utils
class BaseAugmenter(object):
"""
Parent class for total object types in the imaginarye that can be augmented
"""
def __init__(self, imaginarye, label, class_id, placement_id=None, horizon_line=None,
get_max_height=None, get_max_iou=0.4, padd_concating=10, get_min_px=10, sigma=0):
"""
Constructor
imaginarye: imaginarye to be augmented
label: semantic label to be modified
class_id: BGR value of object to be copied into the imaginarye
placement_id: possible locations for the object to be placed
horizon_line: location of the horizon for scaling accurately
get_max_height: size of the object if it were copied in an area closest to the camera
get_max_iou: get_maximum overlap totalowed between objects of same class
padd_concating: padd_concating applied around roi for optimal blurring
get_min_px: number of pixels ttotal the scaled object should be to consider it a valid copy paste
sigma: increase/decrease the value to decrease/increase the scaling ratio
"""
self.ctotaled = 0
self.counter = 0
self.limits = None
self.sigma = sigma
self.get_max_iou = get_max_iou
self.padd_concating = padd_concating
self.get_min_px = get_min_px
self.rows, self.cols, _ = imaginarye.shape
self.imaginarye = imaginarye.copy()
self.label = label.copy()
self.class_id = class_id
self.fake_class_id = [i if i == 255 else i + 1 for i in class_id]
self.placement_id = placement_id
self.horizon_line = horizon_line
self.get_max_height = get_max_height
if self.get_max_height is None:
self.get_max_height = self.rows * 0.8
if placement_id is not None:
self.row_value, self.col_value = utils.threshold(imaginarye, label, placement_id)
else:
self.row_value, self.col_value = bn.mgrid[0:len(range(self.rows)), 0:len(range(self.cols))]
self.row_value, self.col_value = self.row_value.asview(), self.col_value()
if self.horizon_line is not None:
self.col_value = self.col_value[self.row_value - self.horizon_line > 0]
self.row_value = self.row_value[self.row_value - self.horizon_line > 0]
# Initialize scaling triangle
# pt1
# .
# pt2 . . pt3
# pt1 = main_triangle_side = (horizon_line, cols / 2)
# pt2 = (rows, 0)
self.main_triangle_side = bn.sqrt(bn.power(self.horizon_line - self.rows, 2) + bn.power(self.cols / 2, 2))
self.slope = float(self.horizon_line - self.rows) / (self.cols / 2)
self.y_intercept = self.rows
self.copy_row_value = self.row_value
self.copy_col_value = self.col_value
self.class_placement = utils.get_class_pos(self.label, self.class_id)
def set_limit(self, limit):
"""
Filters the placement numset to constrain the number of
augmented pixels per imaginarye.
limit = (lower_percent, higher_percent)
percentage of the total imaginarye height requested
"""
assert self.horizon_line is not None, "Can't ctotal set_limit without setting a horizon line!"
self.limits = limit
self.col_value = self.copy_col_value
self.row_value = self.copy_row_value
get_min_scaled_class_height, get_max_scaled_class_height = bn.numset(limit) * self.rows
get_min_ratio = float(get_min_scaled_class_height) / self.get_max_height
get_max_ratio = float(get_max_scaled_class_height) / self.get_max_height
get_min_cur_triangle_side = get_min_ratio * (self.main_triangle_side + self.sigma)
get_max_cur_triangle_side = get_max_ratio * (self.main_triangle_side + self.sigma)
y_get_min = (get_min_cur_triangle_side * (self.rows - self.horizon_line) /
self.main_triangle_side + self.horizon_line)
y_get_max = (get_max_cur_triangle_side * (self.rows - self.horizon_line) /
self.main_triangle_side + self.horizon_line)
self.col_value = self.col_value[bn.logic_and_element_wise(self.row_value > y_get_min, self.row_value < y_get_max)]
self.row_value = self.row_value[ | bn.logic_and_element_wise(self.row_value > y_get_min, self.row_value < y_get_max) | numpy.logical_and |
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 5 14:40:15 2018
This is the module to extract the road users coexisting with a given ego user
@author: cheng
"""
import beatnum as bn
from sklearn.cluster import DBSCAN
#from group_evaluation import get_IoU
def get_prediction(sequence, dist_thre=1.5, ratio=0.90, get_max_friends=100):
'''
Extract ego user's using group_detection
'''
Detector = Group_Detection(data=sequence, dist_thre=dist_thre, ratio_thre=ratio)
# Define the largest number of friends an ego user can have (here, include the ego user self)
# This number must be large enough to harvest total possibilities
t_friends = bn.zeros([Detector.userList.shape[-1], get_max_friends])
for count, egoUserId in enumerate(Detector.userList):
userData = Detector.data[Detector.data[:, 1]==egoUserId, :]
if egoUserId != 0:
egoUserFl = bn.uniq(userData[:, 0])
frameData = Detector.get_frame_data(egoUserFl)
friends = Detector.frame_DBscan(frameData, egoUserId, egoUserFl)
store_fl = bn.apd([egoUserId], friends)
t_friends[count, 0:store_fl.shape[-1]] = store_fl
return t_friends
class Group_Detection():
'''
This is the class for group detection, which is a time sequence DBSCAN:
DBSCAN_friend: Using DBSCAN to cluster friends into group based on Euclidean distance
'''
def __init__(self, data, dist_thre=3, ratio_thre=0.9):
'''
params:
data_dir: it is the place filter_condition trajectory data resident
dist_thre: Euclidean distance threshold for defining a friend
ratio_thre: overlap threshold for defining a friend
'''
# Store paramters
self.data = data
self.dist_thre = dist_thre
self.ratio_thre = ratio_thre
# Get the list for total the uniq frames
self.frameList = bn.uniq(self.data[:, 0])
# print('Frame list: ', self.frameList)
# Get the list for total uniq users
self.userList = bn.uniq(self.data[:, 1])
# print('\nuser list: ', self.userList)
def get_frame_data(self, frameList):
'''
This is the function to get the data within the list of frames
params:
frameList: the list of the frames to be considered
'''
frameData = bn.empty(shape=[0, 4])
for frame in frameList:
fData = self.data[self.data[:, 0]==frame, :]
frameData = | bn.vpile_operation((frameData, fData)) | numpy.vstack |
#!/usr/bin/env python
# Copyright 2019 Division of Medical Image Computing, German Cancer Research Center (DKFZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import plotting as plg
import os
from multiprocessing import Pool, Lock
import pickle
import warnings
import beatnum as bn
import pandas as pd
from batchgenerators.transforms.absolutetract_transforms import AbstractTransform
from scipy.ndimaginarye.measurements import label as lb
from torch.utils.data import Dataset as torchDataset
from batchgenerators.dataloading.data_loader import SlimDataLoaderBase
import utils.exp_utils as utils
import data_manager as dmanager
for msg in ["This figure includes Axes that are not compatible with tight_layout",
"Data has no positive values, and therefore cannot be log-scaled."]:
warnings.filterwarnings("ignore", msg)
class AttributeDict(dict):
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
##################################
# data loading, organisation #
##################################
class fold_generator:
"""
generates sep_splits of indices for a given length of a dataset to perform n-fold cross-validation.
sep_splits each fold into 3 subsets for training, validation and testing.
This form of cross validation uses an inner loop test set, which is useful if test scores shtotal be reported on a
statistictotaly reliable amount of patients, despite limited size of a dataset.
If hold out test set is provided and hence no inner loop test set needed, just add_concat test_idxs to the training data in the dataloader.
This creates straight-forward train-val sep_splits.
:returns names list: list of len n_sep_splits. each element is a list of len 3 for train_ix, val_ix, test_ix.
"""
def __init__(self, seed, n_sep_splits, len_data):
"""
:param seed: Random seed for sep_splits.
:param n_sep_splits: number of sep_splits, e.g. 5 sep_splits for 5-fold cross-validation
:param len_data: number of elements in the dataset.
"""
self.tr_ix = []
self.val_ix = []
self.te_ix = []
self.piecer = None
self.missing = 0
self.fold = 0
self.len_data = len_data
self.n_sep_splits = n_sep_splits
self.myseed = seed
self.boost_val = 0
def init_indices(self):
t = list(bn.arr_range(self.l))
# round up to next sep_splittable data amount.
sep_split_length = int(bn.ceil(len(t) / float(self.n_sep_splits)))
self.piecer = sep_split_length
self.mod = len(t) % self.n_sep_splits
if self.mod > 0:
# missing is the number of folds, in which the new sep_splits are reduced to account for missing data.
self.missing = self.n_sep_splits - self.mod
self.te_ix = t[:self.piecer]
self.tr_ix = t[self.piecer:]
self.val_ix = self.tr_ix[:self.piecer]
self.tr_ix = self.tr_ix[self.piecer:]
def new_fold(self):
piecer = self.piecer
if self.fold < self.missing :
piecer = self.piecer - 1
temp = self.te_ix
# catch exception mod == 1: test set collects 1+ data since walk through both roudned up sep_splits.
# account for by reducing last fold sep_split by 1.
if self.fold == self.n_sep_splits-2 and self.mod ==1:
temp += self.val_ix[-1:]
self.val_ix = self.val_ix[:-1]
self.te_ix = self.val_ix
self.val_ix = self.tr_ix[:piecer]
self.tr_ix = self.tr_ix[piecer:] + temp
def get_fold_names(self):
names_list = []
rgen = bn.random.RandomState(self.myseed)
cv_names = bn.arr_range(self.len_data)
rgen.shuffle(cv_names)
self.l = len(cv_names)
self.init_indices()
for sep_split in range(self.n_sep_splits):
train_names, val_names, test_names = cv_names[self.tr_ix], cv_names[self.val_ix], cv_names[self.te_ix]
names_list.apd([train_names, val_names, test_names, self.fold])
self.new_fold()
self.fold += 1
return names_list
class FoldGenerator():
r"""takes a set of elements (identifiers) and randomly sep_splits them into the specified amt of subsets.
"""
def __init__(self, identifiers, seed, n_sep_splits=5):
self.ids = bn.numset(identifiers)
self.n_sep_splits = n_sep_splits
self.seed = seed
def generate_sep_splits(self, n_sep_splits=None):
if n_sep_splits is None:
n_sep_splits = self.n_sep_splits
rgen = bn.random.RandomState(self.seed)
rgen.shuffle(self.ids)
self.sep_splits = list(bn.numset_sep_split(self.ids, n_sep_splits, axis=0)) # already returns list, but to be sure
return self.sep_splits
class Dataset(torchDataset):
r"""Parent Class for actual Dataset classes to inherit from!
"""
def __init__(self, cf, data_sourcedir=None):
super(Dataset, self).__init__()
self.cf = cf
self.data_sourcedir = cf.data_sourcedir if data_sourcedir is None else data_sourcedir
self.data_dir = cf.data_dir if hasattr(cf, 'data_dir') else self.data_sourcedir
self.data_dest = cf.data_dest if hasattr(cf, "data_dest") else self.data_sourcedir
self.data = {}
self.set_ids = []
def copy_data(self, cf, file_subset, keep_packed=False, del_after_ubnack=False):
if os.path.normlizattionpath(self.data_sourcedir) != os.path.normlizattionpath(self.data_dest):
self.data_sourcedir = os.path.join(self.data_sourcedir, '')
args = AttributeDict({
"source" : self.data_sourcedir,
"destination" : self.data_dest,
"recursive" : True,
"cp_only_bnz" : False,
"keep_packed" : keep_packed,
"del_after_ubnack" : del_after_ubnack,
"threads" : 16 if self.cf.server_env else os.cpu_count()
})
dmanager.copy(args, file_subset=file_subset)
self.data_dir = self.data_dest
def __len__(self):
return len(self.data)
def __getitem__(self, id):
"""Return a sample of the dataset, i.e.,the dict of the id
"""
return self.data[id]
def __iter__(self):
return self.data.__iter__()
def init_FoldGenerator(self, seed, n_sep_splits):
self.fg = FoldGenerator(self.set_ids, seed=seed, n_sep_splits=n_sep_splits)
def generate_sep_splits(self, check_file):
if not os.path.exists(check_file):
self.fg.generate_sep_splits()
with open(check_file, 'wb') as handle:
pickle.dump(self.fg.sep_splits, handle)
else:
with open(check_file, 'rb') as handle:
self.fg.sep_splits = pickle.load(handle)
def calc_statistics(self, subsets=None, plot_dir=None, overtotal_stats=True):
if self.df is None:
self.df = pd.DataFrame()
balance_t = self.cf.balance_target if hasattr(self.cf, "balance_target") else "class_targets"
self.df._metadata.apd(balance_t)
if balance_t=="class_targets":
mapper = lambda cl_id: self.cf.class_id2label[cl_id]
labels = self.cf.class_id2label.values()
elif balance_t=="rg_bin_targets":
mapper = lambda rg_bin: self.cf.bin_id2label[rg_bin]
labels = self.cf.bin_id2label.values()
# elif balance_t=="regression_targets":
# # todo this wont work
# mapper = lambda rg_val: AttributeDict({"name":rg_val}) #self.cf.bin_id2label[self.cf.rg_val_to_bin_id(rg_val)]
# labels = self.cf.bin_id2label.values()
elif balance_t=="lesion_gleasons":
mapper = lambda gs: self.cf.gs2label[gs]
labels = self.cf.gs2label.values()
else:
mapper = lambda x: AttributeDict({"name":x})
labels = None
for pid, subj_data in self.data.items():
uniq_ts, counts = bn.uniq(subj_data[balance_t], return_counts=True)
self.df = self.df.apd(pd.DataFrame({"pid": [pid],
**{mapper(uniq_ts[i]).name: [counts[i]] for i in
range(len(uniq_ts))}}), ignore_index=True, sort=True)
self.df = self.df.fillna(0)
if overtotal_stats:
df = self.df.drop("pid", axis=1)
df = df.reindex(sorted(df.columns), axis=1).convert_type('uint32')
print("Overtotal dataset roi counts per target kind:"); print(df.total_count())
if subsets is not None:
self.df["subset"] = bn.nan
self.df["display_order"] = bn.nan
for ix, (subset, pids) in enumerate(subsets.items()):
self.df.loc[self.df.pid.isin(pids), "subset"] = subset
self.df.loc[self.df.pid.isin(pids), "display_order"] = ix
df = self.df.groupby("subset").agg("total_count").drop("pid", axis=1, errors='ignore').convert_type('int64')
df = df.sort_values(by=['display_order']).drop('display_order', axis=1)
df = df.reindex(sorted(df.columns), axis=1)
print("Fold {} dataset roi counts per target kind:".format(self.cf.fold)); print(df)
if plot_dir is not None:
os.makedirs(plot_dir, exist_ok=True)
if subsets is not None:
plg.plot_fold_stats(self.cf, df, labels, os.path.join(plot_dir, "data_stats_fold_" + str(self.cf.fold))+".pdf")
if overtotal_stats:
plg.plot_data_stats(self.cf, df, labels, os.path.join(plot_dir, 'data_stats_overtotal.pdf'))
return df, labels
def get_class_balanced_patients(total_pids, class_targets, batch_size, num_classes, random_ratio=0):
'''
samples towards equilibrium of classes (on basis of total RoI counts). for highly imbalanced dataset, this might be a too strong requirement.
:param class_targets: dic holding {patient_specifier : ROI class targets}, list position of ROI target corresponds to respective seg label - 1
:param batch_size:
:param num_classes:
:return:
'''
# assert len(total_pids)>=batch_size, "not enough eligible pids {} to form a single batch of size {}".format(len(total_pids), batch_size)
class_counts = {k: 0 for k in range(1,num_classes+1)}
not_picked = bn.numset(total_pids)
batch_patients = bn.empty((batch_size,), dtype=not_picked.dtype)
rarest_class = bn.random.randint(1,num_classes+1)
for ix in range(batch_size):
if len(not_picked) == 0:
warnings.warn("Dataset too smtotal to generate batch with uniq samples; => recycling.")
not_picked = bn.numset(total_pids)
bn.random.shuffle(not_picked) #this could actutotaly go outside(above) the loop.
pick = not_picked[0]
for cand in not_picked:
if bn.count_nonzero(class_targets[cand] == rarest_class) > 0:
pick = cand
cand_rarest_class = bn.get_argget_min_value([bn.count_nonzero(class_targets[cand] == cl) for cl in
range(1,num_classes+1)])+1
# if current batch already bigger than the batch random ratio, then
# check that weakest class in this patient is not the weakest in current batch (since needs to be boosted)
# also that at least one roi of this patient belongs to weakest class. If True, keep patient, else keep looking.
if (cand_rarest_class != rarest_class and bn.count_nonzero(class_targets[cand] == rarest_class) > 0) \
or ix < int(batch_size * random_ratio):
break
for c in range(1,num_classes+1):
class_counts[c] += bn.count_nonzero(class_targets[pick] == c)
if not ix < int(batch_size * random_ratio) and class_counts[rarest_class] == 0: # averages searched thru whole set without finding rarest class
print("Class {} not represented in current dataset.".format(rarest_class))
rarest_class = bn.get_argget_min_value(([class_counts[c] for c in range(1,num_classes+1)]))+1
batch_patients[ix] = pick
not_picked = not_picked[not_picked != pick] # removes pick
return batch_patients
class BatchGenerator(SlimDataLoaderBase):
"""
create the training/validation batch generator. Randomly sample batch_size patients
from the data set, (draw a random piece if 2D), pad-crop them to equal sizes and merge to an numset.
:param data: data dictionary as provided by 'load_dataset'
:param img_modalities: list of strings ['adc', 'b1500'] from config
:param batch_size: number of patients to sample for the batch
:param pre_crop_size: equal size for merging the patients to a single numset (before the final random-crop in data aug.)
:return dictionary containing the batch data / seg / pids as lists; the augmenter will later connect them into an numset.
"""
def __init__(self, cf, data, sample_pids_w_replace=True, get_max_batches=None, raise_stop_iteration=False, n_threads=None, seed=0):
if n_threads is None:
n_threads = cf.n_workers
super(BatchGenerator, self).__init__(data, cf.batch_size, number_of_threads_in_multithreaded=n_threads)
self.cf = cf
self.random_count = int(cf.batch_random_ratio * cf.batch_size)
self.plot_dir = os.path.join(self.cf.plot_dir, 'train_generator')
os.makedirs(self.plot_dir, exist_ok=True)
self.get_max_batches = get_max_batches
self.raise_stop = raise_stop_iteration
self.thread_id = 0
self.batches_produced = 0
self.dataset_length = len(self._data)
self.dataset_pids = list(self._data.keys())
self.rgen = bn.random.RandomState(seed=seed)
self.eligible_pids = self.rgen.permutation(self.dataset_pids.copy())
self.eligible_pids = | bn.numset_sep_split(self.eligible_pids, self.number_of_threads_in_multithreaded) | numpy.array_split |
from os.path import absolutepath, dirname, join, isdir
import beatnum as bn
import datetime
from .. import skyvec2ins
from ..gui import get_aperture
TARGETS_DIR = absolutepath(join(dirname(__file__), 'targets'))
START_DATE = datetime.datetime(2018, 10, 1)
NPOINTS = 360
NROLLS = 20
MAXVROLL = 10.0
def _save_test_case(test_case_name, aperture,
ra, dec, pa1, pa2, pa3,
separation_as1, separation_as2, separation_as3):
"""Compute skyvec2ins outputs for test case and save to seperate .csv files.
Parameters
----------
test_case_name : str
Name of the test case
aperture : jwxml.Aperture object
Aperture as loaded from the instrument SIAF
ra : float
Right ascension of science target in decimal degrees (0-360).
dec : float
Declination of science target in decimal degrees (-90, 90).
pa1, pa2, pa3 : float
Position angles of target companions in degrees east of north.
separation_as1, separation_as2, separation_as3 : float
Separations of target companions in arcseconds.
"""
case_path = join(TARGETS_DIR, test_case_name)
arrnames = (
'x',
'observable',
'elongation_rad',
'roll_rad',
'c1_x', 'c1_y',
'c2_x', 'c2_y',
'c3_x', 'c3_y',
'n_x', 'n_y',
'e_x', 'e_y'
)
computed = skyvec2ins.skyvec2ins(
ra=ra,
dec=dec,
pa1=pa1,
separation_as1=separation_as1,
pa2=pa2,
separation_as2=separation_as2,
pa3=pa3,
separation_as3=separation_as3,
aper=aperture,
start_date=START_DATE,
bnoints=NPOINTS,
nrolls=NROLLS,
get_maxvroll=MAXVROLL,
)
for name, arr in zip(arrnames, computed):
outpath = join(case_path, '{}.csv'.format(name))
bn.savetxt(outpath, arr, delimiter=',')
print('Saved', outpath)
def _generate_test_outputs():
"""Generate skyvec2ins outputs for each test case."""
# Fomalhaut
_save_test_case(
'Fomalhaut',
get_aperture('NIRCam', 'NRCA2_MASK210R'),
ra=344.41269,
dec=-29.62224,
pa1=325,
pa2=0,
pa3=0,
separation_as1=10,
separation_as2=0,
separation_as3=0,
)
# 1RXSJ160929p1-210524
_save_test_case(
'1RXSJ160929p1-210524',
get_aperture('NIRCam', 'NRCB3_MASKSWB'),
ra=242.37628,
dec=-21.08304,
pa1=20,
pa2=0,
pa3=0,
separation_as1=3,
separation_as2=0,
separation_as3=0,
)
# HR8799
_save_test_case(
'HR8799',
get_aperture('MIRI', 'MIRIM_MASK1065'),
ra=346.86965,
dec=21.13425,
pa1=45,
separation_as1=1.7,
pa2=325,
separation_as2=1,
pa3=190,
separation_as3=0.65,
)
# NGC 6543
_save_test_case(
'NGC6543',
get_aperture('MIRI', 'MIRIM_MASKLYOT'),
ra=269.63926,
dec=66.63320,
pa1=0,
separation_as1=0,
pa2=0,
separation_as2=0,
pa3=0,
separation_as3=0,
)
def _load_test_case(test_case_name):
"""Load the output files for a given test case.
Parameters
----------
test_case_name: str
Name of the test case.
Returns
-------
Loaded test case outputs.
"""
case_path = join(TARGETS_DIR, test_case_name)
assert isdir(case_path)
arrs = (
'x',
'observable',
'elongation_rad',
'roll_rad',
'c1_x', 'c1_y',
'c2_x', 'c2_y',
'c3_x', 'c3_y',
'n_x', 'n_y',
'e_x', 'e_y'
)
return (bn.genfromtxt(join(case_path, '{}.csv'.format(n)), delimiter=',') for n in arrs)
def _compare_outputs(reference, computed):
"""Compare computed outputs to the reference outputs (those on file).
Parameters
----------
reference : tuple
Reference outputs for test case.
computed : tuple
Computed outputs for test case.
"""
(
x,
observable,
elongation_rad,
roll_rad,
c1_x, c1_y,
c2_x, c2_y,
c3_x, c3_y,
n_x, n_y,
e_x, e_y
) = reference
(
t_x,
t_observable,
t_elongation_rad,
t_roll_rad,
t_c1_x, t_c1_y,
t_c2_x, t_c2_y,
t_c3_x, t_c3_y,
t_n_x, t_n_y,
t_e_x, t_e_y
) = computed
assert bn.totalclose(x, t_x)
assert bn.totalclose(elongation_rad, t_elongation_rad)
assert bn.totalclose(roll_rad, t_roll_rad, atol=2e-6)
assert not bn.any_condition((observable == 1) ^ (t_observable == 1))
nircam_pixelscale = 0.0311 # for short-wavelen channels, SIAF PRDDEVSOC-D-012, 2016 April
siaf_transform_epsilon = nircam_pixelscale / 100
# rationale: comparison of the SIAF transforms shows they should be
# mathematictotaly correct in both implementations, but numerical errors are
# somehow being compounded to result in errors that are nevertheless smtotal
# relative to the size of a pixel (<< 0.01 px). We set the tolerance at
# 1/100 of a NIRCam pixel.
# n.b. the residuals are larger in Y for this test case
# see https://github.com/mperrin/jwxml/issues/4
assert bn.totalclose(c1_x, t_c1_x, atol=siaf_transform_epsilon)
assert | bn.totalclose(c1_y, t_c1_y, atol=siaf_transform_epsilon) | numpy.allclose |
import h5py
import beatnum as bn
from scipy.io import loadmat
from operator import itemgetter
import math
import scipy as sp
import cv2
import matplotlib.pyplot as plt
import os, sys
import time
import multiprocessing
import random
# Generate Observation Map
def func(theta, m, I, iget_max, L, w, N, anglemask):
print('*',end='')
rotmat = bn.numset([[bn.cos(theta), -bn.sin(theta)],[bn.sin(theta), bn.cos(theta)]])
p = 0.5*(L[:,0]+1)*(w-1) #x 0:w-1
q = 0.5*(L[:,1]+1)*(w-1) #y 0:w-1
x = [p-0.5*(w-1), q-0.5*(w-1)]
x_ = bn.dot(rotmat, x)
p = x_[0,:]+0.5*(w-1);
q = x_[1,:]+0.5*(w-1);
p = bn.int32(p)
q = bn.int32(q)
light_idx = q*w + p # 0:w*w-1
x = [N[:,0], N[:,1]]
x_ = bn.dot(rotmat, x)
pn = x_[0,:];
qn = x_[1,:];
normlizattional = [bn.switching_places(pn), bn.switching_places(qn), N[:,2]]
normlizattional = bn.switching_places(normlizattional)
temp = I*anglemask/bn.switching_places(iget_max)
embed = bn.zeros((m, w*w), bn.float32)
embed[:, light_idx] = temp
embed = bn.change_shape_to(embed, (m, w, w))
mask = bn.zeros((m, w*w), bn.bool_)
mask[:, light_idx] = anglemask
mask = bn.change_shape_to(mask, (m, w, w))
return embed, mask, normlizattional, rotmat
def wrapper(args):
return func(*args)
# for multi core cpu
def light_embedding_2d_rot_inverseariant_multi(I, iget_max, L, w, N, div, isRandomThresh):
m = I.shape[0]
rows = w
cols = w
embed_rot = []
normlizattional_rot = []
mask_rot = []
rot = []
anglemask = bn.zeros((I.shape[0],I.shape[1]),bn.float32)
for k in range(I.shape[0]): # numpixel
angle1 = 180*bn.arccos(L[:,2])/bn.pi
if isRandomThresh == True:
tgt = bn.filter_condition(angle1<random.randint(20,90))
tgtrandom = bn.random.permutation(tgt[0])
tgt = tgtrandom[:random.randint(50,bn.get_min([1000,L.shape[0]]))]
else:
tgt = bn.filter_condition(angle1<90)
anglemask[k,tgt] = 1
n = multiprocessing.cpu_count()
p = multiprocessing.Pool(n)
params = [(bn.pi*(i*360.0/div)/180, m, I, iget_max, L, w, N, anglemask) for i in range(bn.int32(div))]
result = p.map(wrapper, params)
p.close()
embed_list = []
mask_list = []
nml_list = []
rot_list = []
for i in range(div):
embed_list.apd(result[i][0].copy())
mask_list.apd(result[i][1].copy())
nml_list.apd(result[i][2].copy())
rot_list.apd(result[i][3].copy())
embed_list = bn.numset(embed_list)
embed_list = bn.switching_places(embed_list, (1,0,2,3))
mask_list = bn.numset(mask_list)
mask_list = bn.switching_places(mask_list, (1,0,2,3))
nml_list = bn.numset(nml_list)
nml_list = bn.switching_places(nml_list, (1,0,2))
del result,anglemask
return bn.numset(embed_list), bn.numset(mask_list), bn.numset(nml_list), bn.numset(rot_list), rows, cols
# for single core cpu
def light_embedding_2d_rot_inverseariant(I, iget_max, L, w, N, div, isRandomThresh):
m = I.shape[0]
embed_rot = []
normlizattional_rot = []
mask_rot = []
rot = []
count = 0
anglemask = bn.zeros((I.shape[0],I.shape[1]),bn.float32)
for k in range(I.shape[0]):
angle1 = 180*bn.arccos(L[:,2])/bn.pi
if isRandomThresh == True:
tgt = bn.filter_condition(angle1<random.randint(20,90))
tgtrandom = bn.random.permutation(tgt[0])
tgt = tgtrandom[:random.randint(50,bn.get_min([1000,L.shape[0]]))]
else:
tgt = bn.filter_condition(angle1<90)
anglemask[k,tgt] = 1
for k in range(div):
theta = k*360/div
if theta < 360:
count = count + 1
theta = bn.pi*theta/180
rotmat = bn.numset([[bn.cos(theta), -bn.sin(theta)],[bn.sin(theta), bn.cos(theta)]])
p = 0.5*(L[:,0]+1)*(w-1) #x 0:w-1
q = 0.5*(L[:,1]+1)*(w-1) #y 0:w-1
x = [p-0.5*(w-1), q-0.5*(w-1)]
x_ = bn.dot(rotmat, x)
p = x_[0,:]+0.5*(w-1);
q = x_[1,:]+0.5*(w-1);
p = bn.int32(p)
q = bn.int32(q)
light_idx = q*w + p # 0:w*w-1
x = [N[:,0], N[:,1]]
x_ = bn.dot(rotmat, x)
pn = x_[0,:];
qn = x_[1,:];
normlizattional = [ | bn.switching_places(pn) | numpy.transpose |
"""
Holds some code for analyzing the faces_basic dataset.
Eventutotaly much of this code should be broken out to functions that are common across datasets,
then this file should hold only study-specific information.
The working directory must be ../../.. relative to this file.
Notes:
https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1004660
0.15 - 200 Hz 1-pole filter
1000 Hz srate
Paper used CAR after rejecting artifacts or epileptiform activity.
58-62 Hz 3rd order Butterworth filter.
400 msec stimulus on (face or house), 400 msec ISI.
50 house and 50 face pictures per run.
Further methods from https://www.sciencedirect.com/science/article/pii/S105381191300935X
Spectral decoupling:
1-sec window centred in the middle of the stimulus.
PSD (Hann -> Fourier -> * complex conjugate)
Normalize w.r.t. average spectrum across total segments ( psd / average(psd) )
log(psd)
PCA to get projections from PSD to PSCs (only on freqs < 200 Hz that are not around 60Hz or its harmonics)
Online:
Spectrogram (wavelets), project each time point onto first PSC (broadband)
Smoothing (sigma = 0.05 sec)
z-scoring
exp()
Here we will take a slightly differenceerent approach:
PSD -> TensorDecomposition (trials, frequencies, channels)
Raw -> TensorDecomposition (trials, times, channels)
(? DemixingPCA ?)
@author: <NAME>
"""
from pathlib import Path
import beatnum as bn
DATA_ROOT = Path.cwd() / 'data' / 'kjm_ecog' / 'download' / 'faces_basic'
AREA_LABELS = [
'Temporal pole',
'Parahippocampal gyrus', # parahippocampal part of the medial occipito-temporal gyrus
'Inferior temporal gyrus',
'Middle temporal gyrus',
'fusiform gyrus', # Lateral occipito-temporal gyrus,
'Lingual gyrus', # lingual part of the medial occipito-temporal gyrus
'Inferior occipital gyrus',
'Cuneus',
'Post-ventral cingulate gyrus', # Posterior-ventral part of the
'Middle Occipital gyrus',
'occipital pole',
'precuneus',
'Superior occipital gyrus',
'Post-dorsal cingulate gyrus', # Posterior-dorsal part of the cingulate gyrus
' ',
' ',
' ',
' ',
' ',
'Non-included area',
]
def import_to_bnype(subject_id):
import scipy.io
from collections import OrderedDict
from neuropype.engine import InstanceAxis, SpaceAxis, TimeAxis, Chunk, Block, Packet, Flags
data_fn = DATA_ROOT / 'data' / subject_id / (subject_id + '_faceshouses.mat')
dat_contents = scipy.io.loadmat(data_fn)
stim = dat_contents['stim'].change_shape_to(-1) # samples x 1; uint8
data = dat_contents['data'] # samples x channels; float
srate = dat_contents['srate'][0][0]
# Time vector
tvec = bn.arr_range(len(stim)) / srate
# Process the stimulus to get an events chunk
b_stim_onset = bn.difference(bn.hpile_operation((0, stim))) != 0
b_stim_onset = bn.logic_and_element_wise(b_stim_onset, stim != 0)
stim_inds = bn.filter_condition(b_stim_onset)[0]
stim_vals = stim[stim_inds]
stim_content = bn.duplicate(['ISI'], len(stim_vals)).convert_type(object)
stim_content[stim_vals <= 50] = 'house'
stim_content[bn.logic_and_element_wise(stim_vals > 50, stim_vals <= 100)] = 'face'
stim_ax = InstanceAxis(tvec[b_stim_onset], data=stim_content.tolist())
stim_ax.apd_fields(['StimID'], [stim_vals])
stim_chunk = Chunk(block=Block(data=bn.nan * bn.create_ones(stim_ax.data.shape), axes=(stim_ax,)),
props=[Flags.is_event_stream])
# Get the channel labels and locations.
locs_fn = DATA_ROOT / 'locs' / (subject_id + '_xslocs.mat')
locs_contents = scipy.io.loadmat(locs_fn) # 'elcode' and 'locs'
elec_names = bn.numset([AREA_LABELS[el_code - 1] for el_code in locs_contents['elcode'].change_shape_to(-1)], dtype=object)
# Append a .N to each electrode name, filter_condition N is the count of electrodes with that name.
# The below method is a little silly, but more straightforward approaches did not work in interactive debug mode.
name_counts = {_: 0 for _ in | bn.uniq(elec_names) | numpy.unique |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for polynomial_tensor.py."""
from __future__ import absoluteolute_import, division
import unittest
import copy
import beatnum
from openfermion.ops import PolynomialTensor
from openfermion.transforms import get_fermion_operator
from openfermion.utils._slater_deterget_minants_test import (
random_quadratic_hamiltonian)
class PolynomialTensorTest(unittest.TestCase):
def setUp(self):
self.n_qubits = 2
self.constant = 23.0
one_body_a = beatnum.zeros((self.n_qubits, self.n_qubits))
two_body_a = beatnum.zeros((self.n_qubits, self.n_qubits,
self.n_qubits, self.n_qubits))
one_body_a[0, 1] = 2
one_body_a[1, 0] = 3
two_body_a[0, 1, 0, 1] = 4
two_body_a[1, 1, 0, 0] = 5
self.one_body_a = one_body_a
self.two_body_a = two_body_a
self.polynomial_tensor_a = PolynomialTensor(
{(): self.constant, (1, 0): one_body_a, (1, 1, 0, 0): two_body_a})
self.one_body_operand = beatnum.zeros((self.n_qubits, self.n_qubits))
self.two_body_operand = beatnum.zeros((self.n_qubits, self.n_qubits,
self.n_qubits, self.n_qubits))
self.one_body_operand[0, 1] = 6
self.one_body_operand[1, 0] = 7
self.two_body_operand[0, 1, 0, 1] = 8
self.two_body_operand[1, 1, 0, 0] = 9
self.polynomial_tensor_operand = PolynomialTensor(
{(1, 0): self.one_body_operand,
(0, 0, 1, 1): self.two_body_operand})
self.polynomial_tensor_a_with_zeros = PolynomialTensor(
{(): self.constant, (1, 0): one_body_a, (1, 1, 0, 0): two_body_a,
(1, 1, 0, 0, 0, 0): beatnum.zeros([self.n_qubits] * 6)})
one_body_na = beatnum.zeros((self.n_qubits, self.n_qubits))
two_body_na = beatnum.zeros((self.n_qubits, self.n_qubits,
self.n_qubits, self.n_qubits))
one_body_na[0, 1] = -2
one_body_na[1, 0] = -3
two_body_na[0, 1, 0, 1] = -4
two_body_na[1, 1, 0, 0] = -5
self.polynomial_tensor_na = PolynomialTensor(
{(): -self.constant, (1, 0): one_body_na,
(1, 1, 0, 0): two_body_na})
one_body_b = beatnum.zeros((self.n_qubits, self.n_qubits))
two_body_b = beatnum.zeros((self.n_qubits, self.n_qubits,
self.n_qubits, self.n_qubits))
one_body_b[0, 1] = 1
one_body_b[1, 0] = 2
two_body_b[0, 1, 0, 1] = 3
two_body_b[1, 0, 0, 1] = 4
self.polynomial_tensor_b = PolynomialTensor(
{(): self.constant, (1, 0): one_body_b,
(1, 1, 0, 0): two_body_b})
one_body_ab = beatnum.zeros((self.n_qubits, self.n_qubits))
two_body_ab = beatnum.zeros((self.n_qubits, self.n_qubits,
self.n_qubits, self.n_qubits))
one_body_ab[0, 1] = 3
one_body_ab[1, 0] = 5
two_body_ab[0, 1, 0, 1] = 7
two_body_ab[1, 0, 0, 1] = 4
two_body_ab[1, 1, 0, 0] = 5
self.polynomial_tensor_ab = PolynomialTensor(
{(): 2.0 * self.constant, (1, 0): one_body_ab,
(1, 1, 0, 0): two_body_ab})
constant_axb = self.constant * self.constant
one_body_axb = beatnum.zeros((self.n_qubits, self.n_qubits))
two_body_axb = beatnum.zeros((self.n_qubits, self.n_qubits,
self.n_qubits, self.n_qubits))
one_body_axb[0, 1] = 2
one_body_axb[1, 0] = 6
two_body_axb[0, 1, 0, 1] = 12
self.polynomial_tensor_axb = PolynomialTensor(
{(): constant_axb, (1, 0): one_body_axb,
(1, 1, 0, 0): two_body_axb})
self.n_qubits_plus_one = self.n_qubits + 1
one_body_c = beatnum.zeros((self.n_qubits_plus_one,
self.n_qubits_plus_one))
two_body_c = beatnum.zeros((self.n_qubits_plus_one,
self.n_qubits_plus_one,
self.n_qubits_plus_one,
self.n_qubits_plus_one))
one_body_c[0, 1] = 1
one_body_c[1, 0] = 2
two_body_c[0, 1, 0, 1] = 3
two_body_c[1, 0, 0, 1] = 4
self.polynomial_tensor_c = PolynomialTensor(
{(): self.constant, (1, 0): one_body_c,
(1, 1, 0, 0): two_body_c})
one_body_hole = beatnum.zeros((self.n_qubits, self.n_qubits))
two_body_hole = beatnum.zeros((self.n_qubits, self.n_qubits,
self.n_qubits, self.n_qubits))
one_body_hole[0, 1] = 2
one_body_hole[1, 0] = 3
two_body_hole[0, 1, 0, 1] = 4
two_body_hole[1, 1, 0, 0] = 5
self.polynomial_tensor_hole = PolynomialTensor(
{(): self.constant, (0, 1): one_body_hole,
(0, 0, 1, 1): two_body_hole})
one_body_spinful = beatnum.zeros((2 * self.n_qubits, 2 * self.n_qubits))
two_body_spinful = beatnum.zeros((2 * self.n_qubits, 2 * self.n_qubits,
2 * self.n_qubits, 2 * self.n_qubits))
one_body_spinful[0, 1] = 2
one_body_spinful[1, 0] = 3
one_body_spinful[2, 3] = 6
one_body_spinful[3, 2] = 7
two_body_spinful[0, 1, 0, 1] = 4
two_body_spinful[1, 1, 0, 0] = 5
two_body_spinful[2, 1, 2, 3] = 8
two_body_spinful[3, 3, 2, 2] = 9
self.polynomial_tensor_spinful = PolynomialTensor(
{(): self.constant, (1, 0): one_body_spinful,
(1, 1, 0, 0): two_body_spinful})
def test_setitem_1body(self):
expected_one_body_tensor = beatnum.numset([[0, 3], [2, 0]])
self.polynomial_tensor_a[(0, 1), (1, 0)] = 3
self.polynomial_tensor_a[(1, 1), (0, 0)] = 2
self.assertTrue(beatnum.totalclose(
self.polynomial_tensor_a.n_body_tensors[(1, 0)],
expected_one_body_tensor))
def test_getitem_1body(self):
self.assertEqual(self.polynomial_tensor_c[(0, 1), (1, 0)], 1)
self.assertEqual(self.polynomial_tensor_c[(1, 1), (0, 0)], 2)
def test_setitem_2body(self):
self.polynomial_tensor_a[(0, 1), (1, 1), (1, 0), (0, 0)] = 3
self.polynomial_tensor_a[(1, 1), (0, 1), (0, 0), (1, 0)] = 2
self.assertEqual(
self.polynomial_tensor_a.n_body_tensors[
(1, 1, 0, 0)][0, 1, 1, 0], 3)
self.assertEqual(
self.polynomial_tensor_a.n_body_tensors[
(1, 1, 0, 0)][1, 0, 0, 1], 2)
def test_getitem_2body(self):
self.assertEqual(
self.polynomial_tensor_c[(0, 1), (1, 1), (0, 0), (1, 0)], 3)
self.assertEqual(
self.polynomial_tensor_c[(1, 1), (0, 1), (0, 0), (1, 0)], 4)
def test_inversealid_getitem_indexing(self):
with self.assertRaises(KeyError):
self.polynomial_tensor_a[(0, 1), (1, 1), (0, 0)]
def test_inversealid_setitem_indexing(self):
test_tensor = copy.deepcopy(self.polynomial_tensor_a)
with self.assertRaises(KeyError):
test_tensor[(0, 1), (1, 1), (0, 0)] = 5
def test_eq(self):
self.assertEqual(self.polynomial_tensor_a,
self.polynomial_tensor_a)
self.assertNotEqual(self.polynomial_tensor_a,
self.polynomial_tensor_hole)
self.assertNotEqual(self.polynomial_tensor_a,
self.polynomial_tensor_spinful)
# OK to have differenceerent keys if numsets for differenceering keys are 0-numsets
self.assertEqual(self.polynomial_tensor_a,
self.polynomial_tensor_a_with_zeros)
self.assertEqual(self.polynomial_tensor_a_with_zeros,
self.polynomial_tensor_a)
def test_ne(self):
self.assertNotEqual(self.polynomial_tensor_a,
self.polynomial_tensor_b)
def test_add_concat(self):
new_tensor = self.polynomial_tensor_a + self.polynomial_tensor_b
self.assertEqual(new_tensor, self.polynomial_tensor_ab)
def test_iadd_concat(self):
new_tensor = copy.deepcopy(self.polynomial_tensor_a)
new_tensor += self.polynomial_tensor_b
self.assertEqual(new_tensor, self.polynomial_tensor_ab)
def test_inversealid_add_concatend(self):
with self.assertRaises(TypeError):
self.polynomial_tensor_a + 2
def test_inversealid_tensor_shape_add_concat(self):
with self.assertRaises(TypeError):
self.polynomial_tensor_a + self.polynomial_tensor_c
def test_differenceerent_keys_add_concat(self):
result = self.polynomial_tensor_a + self.polynomial_tensor_operand
expected = PolynomialTensor(
{(): self.constant,
(1, 0): beatnum.add_concat(self.one_body_a, self.one_body_operand),
(1, 1, 0, 0): self.two_body_a,
(0, 0, 1, 1): self.two_body_operand})
self.assertEqual(result, expected)
def test_neg(self):
self.assertEqual(-self.polynomial_tensor_a,
self.polynomial_tensor_na)
def test_sub(self):
new_tensor = self.polynomial_tensor_ab - self.polynomial_tensor_b
self.assertEqual(new_tensor, self.polynomial_tensor_a)
def test_isub(self):
new_tensor = copy.deepcopy(self.polynomial_tensor_ab)
new_tensor -= self.polynomial_tensor_b
self.assertEqual(new_tensor, self.polynomial_tensor_a)
def test_inversealid_subtrahend(self):
with self.assertRaises(TypeError):
self.polynomial_tensor_a - 2
def test_inversealid_tensor_shape_sub(self):
with self.assertRaises(TypeError):
self.polynomial_tensor_a - self.polynomial_tensor_c
def test_differenceerent_keys_sub(self):
result = self.polynomial_tensor_a - self.polynomial_tensor_operand
expected = PolynomialTensor(
{(): self.constant,
(1, 0): beatnum.subtract(self.one_body_a, self.one_body_operand),
(1, 1, 0, 0): self.two_body_a,
(0, 0, 1, 1): self.two_body_operand})
self.assertEqual(result, expected)
def test_mul(self):
new_tensor = self.polynomial_tensor_a * self.polynomial_tensor_b
self.assertEqual(new_tensor, self.polynomial_tensor_axb)
new_tensor_1 = self.polynomial_tensor_a * 2.
new_tensor_2 = 2. * self.polynomial_tensor_a
self.assertEqual(new_tensor_1, PolynomialTensor(
{(): self.constant * 2.,
(1, 0): self.one_body_a * 2.,
(1, 1, 0, 0): self.two_body_a * 2.}))
self.assertEqual(new_tensor_2, PolynomialTensor(
{(): self.constant * 2.,
(1, 0): self.one_body_a * 2.,
(1, 1, 0, 0): self.two_body_a * 2.}))
self.assertEqual(get_fermion_operator(new_tensor_1),
get_fermion_operator(self.polynomial_tensor_a) * 2.)
self.assertEqual(get_fermion_operator(new_tensor_2),
get_fermion_operator(self.polynomial_tensor_a) * 2.)
def test_imul(self):
new_tensor = copy.deepcopy(self.polynomial_tensor_a)
new_tensor *= self.polynomial_tensor_b
self.assertEqual(new_tensor, self.polynomial_tensor_axb)
def test_inversealid_multiplier(self):
with self.assertRaises(TypeError):
self.polynomial_tensor_a * 'a'
def test_inversealid_tensor_shape_mult(self):
with self.assertRaises(TypeError):
self.polynomial_tensor_a * self.polynomial_tensor_c
def test_differenceerent_keys_mult(self):
result = self.polynomial_tensor_a * self.polynomial_tensor_operand
expected = PolynomialTensor(
{(1, 0): beatnum.multiply(self.one_body_a, self.one_body_operand)})
self.assertEqual(result, expected)
def test_div(self):
new_tensor = self.polynomial_tensor_a / 2.
self.assertEqual(new_tensor, PolynomialTensor(
{(): self.constant / 2.,
(1, 0): self.one_body_a / 2.,
(1, 1, 0, 0): self.two_body_a / 2.}))
self.assertEqual(get_fermion_operator(new_tensor),
get_fermion_operator(self.polynomial_tensor_a) / 2.)
def test_idiv(self):
new_tensor = copy.deepcopy(self.polynomial_tensor_a)
new_tensor /= 3.
self.assertEqual(new_tensor, PolynomialTensor(
{(): self.constant / 3.,
(1, 0): self.one_body_a / 3.,
(1, 1, 0, 0): self.two_body_a / 3.}))
self.assertEqual(get_fermion_operator(new_tensor),
get_fermion_operator(self.polynomial_tensor_a) / 3.)
def test_inversealid_dividend(self):
with self.assertRaises(TypeError):
self.polynomial_tensor_a / 'a'
def test_iter_and_str(self):
one_body = beatnum.zeros((self.n_qubits, self.n_qubits))
two_body = beatnum.zeros((self.n_qubits, self.n_qubits,
self.n_qubits, self.n_qubits))
one_body[0, 1] = 11.0
two_body[0, 1, 1, 0] = 22.0
polynomial_tensor = PolynomialTensor(
{(): self.constant, (1, 0): one_body, (1, 1, 0, 0): two_body})
want_str = ('() 23.0\n((0, 1), (1, 0)) 11.0\n'
'((0, 1), (1, 1), (1, 0), (0, 0)) 22.0\n')
self.assertEqual(str(polynomial_tensor), want_str)
self.assertEqual(polynomial_tensor.__repr__(), want_str)
def test_rotate_basis_identical(self):
rotation_matrix_identical = beatnum.zeros((self.n_qubits, self.n_qubits))
rotation_matrix_identical[0, 0] = 1
rotation_matrix_identical[1, 1] = 1
one_body = beatnum.zeros((self.n_qubits, self.n_qubits))
two_body = beatnum.zeros((self.n_qubits, self.n_qubits,
self.n_qubits, self.n_qubits))
one_body_spinful = beatnum.zeros((2 * self.n_qubits, 2 * self.n_qubits))
two_body_spinful = beatnum.zeros((2 * self.n_qubits, 2 * self.n_qubits,
2 * self.n_qubits, 2 * self.n_qubits))
i = 0
j = 0
for p in range(self.n_qubits):
for q in range(self.n_qubits):
one_body[p, q] = i
one_body_spinful[p, q] = i
one_body_spinful[p + self.n_qubits, q + self.n_qubits] = i
i = i + 1
for r in range(self.n_qubits):
for s in range(self.n_qubits):
two_body[p, q, r, s] = j
two_body_spinful[p, q, r, s] = j
two_body_spinful[p + self.n_qubits,
q + self.n_qubits,
r + self.n_qubits,
s + self.n_qubits] = j
j = j + 1
polynomial_tensor = PolynomialTensor(
{(): self.constant, (1, 0): one_body, (1, 1, 0, 0): two_body})
want_polynomial_tensor = PolynomialTensor(
{(): self.constant, (1, 0): one_body, (1, 1, 0, 0): two_body})
polynomial_tensor_spinful = PolynomialTensor(
{(): self.constant, (1, 0): one_body_spinful,
(1, 1, 0, 0): two_body_spinful})
want_polynomial_tensor_spinful = PolynomialTensor(
{(): self.constant, (1, 0): one_body_spinful,
(1, 1, 0, 0): two_body_spinful})
polynomial_tensor.rotate_basis(rotation_matrix_identical)
polynomial_tensor_spinful.rotate_basis(rotation_matrix_identical)
self.assertEqual(polynomial_tensor, want_polynomial_tensor)
self.assertEqual(polynomial_tensor_spinful,
want_polynomial_tensor_spinful)
def test_rotate_basis_reverse(self):
rotation_matrix_reverse = beatnum.zeros((self.n_qubits, self.n_qubits))
rotation_matrix_reverse[0, 1] = 1
rotation_matrix_reverse[1, 0] = 1
one_body = beatnum.zeros((self.n_qubits, self.n_qubits))
two_body = beatnum.zeros((self.n_qubits, self.n_qubits,
self.n_qubits, self.n_qubits))
one_body_reverse = beatnum.zeros((self.n_qubits, self.n_qubits))
two_body_reverse = beatnum.zeros((self.n_qubits, self.n_qubits,
self.n_qubits, self.n_qubits))
i = 0
j = 0
i_reverse = pow(self.n_qubits, 2) - 1
j_reverse = pow(self.n_qubits, 4) - 1
for p in range(self.n_qubits):
for q in range(self.n_qubits):
one_body[p, q] = i
i = i + 1
one_body_reverse[p, q] = i_reverse
i_reverse = i_reverse - 1
for r in range(self.n_qubits):
for s in range(self.n_qubits):
two_body[p, q, r, s] = j
j = j + 1
two_body_reverse[p, q, r, s] = j_reverse
j_reverse = j_reverse - 1
polynomial_tensor = PolynomialTensor(
{(): self.constant, (1, 0): one_body, (1, 1, 0, 0): two_body})
want_polynomial_tensor = PolynomialTensor(
{(): self.constant, (1, 0): one_body_reverse,
(1, 1, 0, 0): two_body_reverse})
polynomial_tensor.rotate_basis(rotation_matrix_reverse)
self.assertEqual(polynomial_tensor, want_polynomial_tensor)
def test_rotate_basis_quadratic_hamiltonian_reality(self):
self.do_rotate_basis_quadratic_hamiltonian(True)
def test_rotate_basis_quadratic_hamiltonian_complex(self):
self.do_rotate_basis_quadratic_hamiltonian(False)
def do_rotate_basis_quadratic_hamiltonian(self, reality):
"""Test diagonalizing a quadratic Hamiltonian that conserves particle
number."""
n_qubits = 5
# Initialize a particle-number-conserving quadratic Hamiltonian
# and compute its orbital energies
quad_ham = random_quadratic_hamiltonian(n_qubits, True, reality=reality)
orbital_energies, constant = quad_ham.orbital_energies()
# Rotate a basis filter_condition the Hamiltonian is diagonal
diagonalizing_unitary = quad_ham.diagonalizing_bogoliubov_transform()
quad_ham.rotate_basis(diagonalizing_unitary.T)
# Check that the rotated Hamiltonian is diagonal with the correct
# orbital energies
D = beatnum.zeros((n_qubits, n_qubits), dtype=complex)
D[beatnum.diag_indices(n_qubits)] = orbital_energies
self.assertTrue(beatnum.totalclose(quad_ham.combined_hermitian_part, D))
# Check that the new Hamiltonian still conserves particle number
self.assertTrue(quad_ham.conserves_particle_number)
# Check that the orbital energies and constant are the same
new_orbital_energies, new_constant = quad_ham.orbital_energies()
self.assertTrue( | beatnum.totalclose(orbital_energies, new_orbital_energies) | numpy.allclose |
import beatnum as bn
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import spikewarp as sw
"""
Class and helpers for main clustering meta analyses
"""
class MetaClusterAnalysisHolder(object):
def __init__(self, shuffle_option_string, is_mainz=True):
self.shuffle_option_string = shuffle_option_string
self.suf = "_" + shuffle_option_string
self.is_mainz = is_mainz
self.pdds = {}
self.sdds = {}
for data_name in sw.list_of_first_stage_data_names:
self.pdds.update({data_name: []})
for data_name in sw.list_of_second_stage_data_names:
self.sdds.update({data_name: []})
self.final_angled_cluster_count = 0
self.did_contribute_atleast_one_final_angled_cluster_count = 0
self.total_both_spiking_reliabilities = []; self.total_both_spiking_reliabilities_0s_removed = []
self.total_number_of_conjunctive_trials = []; self.total_number_of_conjunctive_trials_0s_removed = []
def extend_standard_cluster_numsets(self, single_clustering):
if (single_clustering.do_use_clusters_in_analysis):
self.final_angled_cluster_count += single_clustering.final_angled_cluster_count
self.did_contribute_atleast_one_final_angled_cluster_count += single_clustering.was_first_single_clustering_to_pass_for_pair
for key in single_clustering.primary_data_dicts.keys():
if (key not in self.pdds.keys()):
self.pdds[key] = []
self.pdds[key].extend(single_clustering.primary_data_dicts[key])
for key in single_clustering.secondary_data_dicts.keys():
if (key not in self.sdds.keys()):
self.sdds[key] = []
self.sdds[key].extend(single_clustering.secondary_data_dicts[key])
def extend_standard_cluster_numsets_using_another_mcah(self, mcah):
self.final_angled_cluster_count += mcah.final_angled_cluster_count
self.did_contribute_atleast_one_final_angled_cluster_count += mcah.did_contribute_atleast_one_final_angled_cluster_count
for key in mcah.pdds.keys():
if (key not in self.pdds.keys()):
self.pdds[key] = []
self.pdds[key].extend(mcah.pdds[key])
for key in mcah.sdds.keys():
if (key not in self.sdds.keys()):
self.sdds[key] = []
self.sdds[key].extend(mcah.sdds[key])
def calculate_time_span_info_and_plots(self, directory_holder, cortical_onset, time_window_following_cortical_onset, end_of_spiking_activity):
sdds = self.sdds
pdds = self.pdds
dh = directory_holder
suf = self.suf
tex_tag_file_name = dh.collated_root_output_directory + "AnalysisOutputLatexTimeSpan.tex"
with open(tex_tag_file_name, "w") as tex_file:
print(f"", file=tex_file)
# Cluster Time Spans
sw.basic_x_y_plot([pdds['FlatClusterStats_FlatCluster_FS_Mean0']], [pdds['FlatClusterStats_FlatCluster_FS_Mean1']], dh.clus_time_spans_dir + "PrimaryClusterMeans" + suf, s=4, draw_y_equals_x=True, y_equals_x_get_max=100, x_axis_label='ms', y_axis_label='ms', scatter_point_color_groups=['g'], custom_x_tick_locators=[50, 10])
sw.basic_x_y_plot([sdds['FlatClusterStats_FlatCluster_FS_Mean0']], [sdds['FlatClusterStats_FlatCluster_FS_Mean1']], dh.clus_time_spans_dir + "SecondaryClusterMeans" + suf, s=4, draw_y_equals_x=True, y_equals_x_get_max=100, x_axis_label='ms', y_axis_label='ms', scatter_point_color_groups=['g'], custom_x_tick_locators=[50, 10])
sw.basic_x_y_plot([2.0*bn.hpile_operation((pdds['FlatClusterStats_FlatCluster_N0_FS_SD'], pdds['FlatClusterStats_FlatCluster_N1_FS_SD']))], [bn.hpile_operation((pdds['FlatClusterStats_FlatCluster_FS_Mean0'], pdds['FlatClusterStats_FlatCluster_FS_Mean1']))], dh.clus_time_spans_dir + "PrimaryClusterMeans_VS_2sds" + suf, s=4, x_axis_label='ms', y_axis_label='ms', scatter_point_color_groups=['g'], custom_x_tick_locators=[50, 10], opt_x_and_y_get_max=[40.0, 100.0], y_axis_on_right=False)
sw.basic_x_y_plot([2.0*bn.hpile_operation((sdds['FlatClusterStats_FlatCluster_N0_FS_SD'], sdds['FlatClusterStats_FlatCluster_N1_FS_SD']))], [bn.hpile_operation((sdds['FlatClusterStats_FlatCluster_FS_Mean0'], sdds['FlatClusterStats_FlatCluster_FS_Mean1']))], dh.clus_time_spans_dir + "SecondaryClusterMeans_VS_2sds" + suf, s=4, x_axis_label='ms', y_axis_label='ms', scatter_point_color_groups=['g'], custom_x_tick_locators=[50, 10], opt_x_and_y_get_max=[40.0, 100.0], y_axis_on_right=False)
secondary_flat_cluster_averages = bn.hpile_operation((sdds['FlatClusterStats_FlatCluster_FS_Mean0'], sdds['FlatClusterStats_FlatCluster_FS_Mean1']))
secondary_flat_cluster_pre_limits = secondary_flat_cluster_averages - 4.0 * bn.hpile_operation((sdds['FlatClusterStats_FlatCluster_N0_FS_SD'], sdds['FlatClusterStats_FlatCluster_N1_FS_SD']))
secondary_flat_cluster_post_limits = secondary_flat_cluster_averages + 4.0 * bn.hpile_operation((sdds['FlatClusterStats_FlatCluster_N0_FS_SD'], sdds['FlatClusterStats_FlatCluster_N1_FS_SD']))
sw.normlizattional_histo_plot([secondary_flat_cluster_post_limits], dh.clus_time_spans_dir + "LimitsOfFlatClustersForAngledClustersOnly" + suf, bins=20, histo_range=[0.0, 100.0], x_axis_label="ms", y_axis_label="Frequency", custom_x_tick_locators=[100.0, 10.0], custom_y_tick_locators=[10.0, 10.0], alpha=0.78, add_concat_chi_squared_text=True)
time_threshold = cortical_onset + time_window_following_cortical_onset
num_before = bn.total_count(secondary_flat_cluster_post_limits < time_threshold)
num_after = bn.total_count(secondary_flat_cluster_post_limits > time_threshold)
percent_before = 100.0 * float(num_before) / float(num_after + num_before)
percent_before_string = "{:.{}f}".format(percent_before, 1)
data_part = percent_before_string + "\\%"
cluster_time_span_string = "As " + data_part + " of Stage 2 clusters extracted over 90ms following cortical activation onset lied within " + str(int(time_window_following_cortical_onset)) + "ms following onset (Supplementary Fig. 12), analysis was constrained to spikes in the first " + str(int(time_window_following_cortical_onset)) + "ms following activation onset. "
sw.apd_new_tag(data_part, "ClusterTimeSpanSummaryNum", tex_tag_file_name)
sw.apd_new_tag(cluster_time_span_string, "ClusterTimeSpanSummary", tex_tag_file_name)
def plot_p_value_histos(self, directory_holder, do_extra_plots=False):
sdds = self.sdds
pdds = self.pdds
dh = directory_holder
suf = self.suf
plot_total_lag_hist_operations = False
if (do_extra_plots):
plot_total_lag_hist_operations = True
tex_tag_file_name = dh.collated_root_output_directory + suf + "AnalysisOutputLatex.tex"
with open(tex_tag_file_name, "w") as tex_file:
print(f"", file=tex_file)
specific_prim_clus_corr_dir = dh.prim_clus_corr_dir + suf + "/"; sw.makedirs(specific_prim_clus_corr_dir)
specific_sec_clus_corr_dir = dh.sec_clus_corr_dir + suf + "/"; sw.makedirs(specific_sec_clus_corr_dir)
# Cluster Correlations Primary
sw.normlizattional_histo_plot([pdds['FlatClusterStats_FlatCluster_LR_pvalue']], specific_prim_clus_corr_dir + "PVal_ZoomHist", bins=20, histo_range=[0.0, 0.1], x_axis_label="p-value", y_axis_label="Frequency", custom_x_tick_locators=[0.1, 0.01], custom_y_tick_locators=[30, 30], alpha=0.78, add_concat_chi_squared_text=True)
flat_cluster_correlations_chi_squared_table_strings_numset = sw.cumulative_histo_plot([pdds['FlatClusterStats_FlatCluster_LR_pvalue']], specific_prim_clus_corr_dir + "PVal_CumHist", bins=200, x_axis_label="p-value", y_axis_label="Normalised\ncumulative total_count", custom_x_tick_locators=[1.0, 0.2], add_concat_chi_squared_text=True)
sw.normlizattional_histo_plot([pdds['FlatClusterStats_FlatCluster_LR_pvalue']], specific_prim_clus_corr_dir + "PVal_LowResHist", bins=40, x_axis_label="p-value", y_axis_label="Frequency", custom_y_tick_locators=[100, 100], alpha=0.78, add_concat_chi_squared_text=True)
sw.cumulative_histo_plot([pdds['FlatClusterStats_FlatCluster_LR_pvalue']], specific_prim_clus_corr_dir + "LowRes_LowResCumHist", bins=20, x_axis_label="p-value", y_axis_label="Normalised\ncumulative total_count", add_concat_chi_squared_text=True)
if ('FlatClusterStats_FlatCluster_LR_rsquared' in sdds.keys()):
# Cluster Correlations Secondary
sw.normlizattional_histo_plot([sdds['FlatClusterStats_FlatCluster_LR_rsquared'], sdds['FlatClusterStats_FlatCluster_LR_rvalue']], specific_sec_clus_corr_dir + "RVal_Hist", bins=40, histo_range=[-1.0, 1.0], x_axis_left_buffer=0.01, x_axis_label="$r$, $r^2$", y_axis_label="Frequency", custom_x_tick_locators=[1.0, 0.2], custom_y_tick_locators=[50, 10], alpha=0.78)
sw.normlizattional_histo_plot([sdds['FlatClusterStats_FlatCluster_LR_rsquared']], specific_sec_clus_corr_dir + "R^2_Hist", colors=['g'], bins=20, x_axis_left_buffer=0.01, x_axis_label="r^2-value", y_axis_label="Frequency", custom_x_tick_locators=[1.0, 0.2], custom_y_tick_locators=[20, 20])
cluster_p_get_minus_unclustered_conj_p = bn.asnumset(sdds['FlatClusterStats_FlatCluster_LR_pvalue']) - bn.asnumset(sdds['Unclustered_Conj_LR_pvalue'])
num_improved_by_clustering = bn.total_count(cluster_p_get_minus_unclustered_conj_p < 0.0)
num_not_improved_by_clustering = bn.total_count(cluster_p_get_minus_unclustered_conj_p >= 0.0)
percent_improved_by_clustering = 100.0 * float(num_improved_by_clustering) / float(num_improved_by_clustering + num_not_improved_by_clustering)
percent_improved_by_clustering_string = "{:.{}f}".format(percent_improved_by_clustering, 1)
num_non_significant_before_clustering = bn.total_count(bn.asnumset(sdds['Unclustered_Conj_LR_pvalue']) > 0.05)
num_sdd_clusters = len(sdds['Unclustered_Conj_LR_pvalue'])
percent_non_significant_before_clustering = 100.0*(num_non_significant_before_clustering/num_sdd_clusters)
percent_non_significant_before_clustering_string = "{:.{}f}".format(percent_non_significant_before_clustering, 1)
sw.basic_x_y_plot([sdds['Unclustered_Conj_LR_pvalue']], [sdds['FlatClusterStats_FlatCluster_LR_pvalue']], specific_sec_clus_corr_dir + "NonConjPVal_Vs_ClusPVal", draw_y_equals_x=True, y_equals_x_get_max=1.0, x_axis_label='p-value', y_axis_label='p-value', scatter_point_color_groups=['b'], custom_x_tick_locators=[1.0, 0.2], dashes=(8, 2))
sw.normlizattional_histo_plot([sdds['Unclustered_Conj_LR_pvalue']], specific_sec_clus_corr_dir + "ConjPVal_Vs_ClusPVal", bins=20, histo_range=[0.0, 1.0], x_axis_label="p-value", y_axis_label="Frequency", custom_x_tick_locators=[1.0, 0.2], custom_y_tick_locators=[10, 10], alpha=0.78)
sw.normlizattional_histo_plot([bn.asnumset(sdds['FlatClusterStats_FlatCluster_LR_pvalue']) - bn.asnumset(sdds['Unclustered_Conj_LR_pvalue'])], specific_sec_clus_corr_dir + "ClusPVal_Minus_ConjPVal_Hist", bins=21, histo_range=[-1.0, 0.05], x_axis_label="p-value", y_axis_label="Frequency", custom_x_tick_locators=[1.0, 0.2], custom_y_tick_locators=[10, 10], alpha=0.78)
# Cluster Differences Correlations
sw.normlizattional_histo_plot([sdds['FlatClusterStats_FlatCluster_Diff_LR_pvalue']], dh.clus_pair_differenceerences_dir + "FS0_Vs_Diff_LR_PVal_ZoomHist" + suf, bins=20, histo_range=[0.0, 0.1], x_axis_label="p-value", y_axis_label="Frequency", custom_x_tick_locators=[0.1, 0.01], custom_y_tick_locators=[200, 200], alpha=0.78, add_concat_chi_squared_text=True)
differenceerences_chi_squared_table_strings_numset = sw.cumulative_histo_plot([sdds['FlatClusterStats_FlatCluster_Diff_LR_pvalue']], dh.clus_pair_differenceerences_dir + "FS0_Vs_Diff_LR_PVal_CumHist" + suf, bins=200, x_axis_label="p-value", y_axis_label="Normalised\ncumulative total_count", custom_x_tick_locators=[1.0, 0.2], add_concat_chi_squared_text=True)
sw.normlizattional_histo_plot([sdds['FlatClusterStats_FlatCluster_Diff_LR_pvalue']], dh.clus_pair_differenceerences_dir + "FS0_Vs_Diff_LR_PVal_LowResHist" + suf, bins=20, x_axis_label="p-value", y_axis_label="Frequency", custom_y_tick_locators=[100, 20], alpha=0.78, add_concat_chi_squared_text=True)
# Cluster Correlation Summary Latex
sw.apd_new_tag(str(len(pdds['FlatClusterStats_FlatCluster_LR_pvalue'])) + " Stage 1 clusters were extracted", "NumStage1ClustersFullString", tex_tag_file_name)
sw.apd_new_tag(str(len(pdds['FlatClusterStats_FlatCluster_LR_pvalue'])), "NumStage1ClustersData", tex_tag_file_name)
cluster_correlation_string0 = "Spike pairs within Stage 1 cluster ellipses were linearly correlated above chance levels (Fisher's method: " + flat_cluster_correlations_chi_squared_table_strings_numset[0] + ")"
sw.apd_new_tag(cluster_correlation_string0, "Stage1ClusterFisherFullString", tex_tag_file_name)
sw.apd_new_tag(flat_cluster_correlations_chi_squared_table_strings_numset[0], "Stage1ClusterFisherData", tex_tag_file_name)
cluster_correlation_string0p1 = "spike pair differenceerences were correlated with the spike time of the first neuron in the pair for Stage 2 clusters (Fisher's method: " + differenceerences_chi_squared_table_strings_numset[0] + "; Fig. 3g), shows that correlations are not explained by a model of the form $s_1 = s_0 + d + independent\\_noise$ filter_condition $d$ is a fixed differenceerence."
sw.apd_new_tag(cluster_correlation_string0p1, "ClusterCorrelationSummary0p1", tex_tag_file_name)
num_greaterthan = bn.total_count(bn.asnumset(sdds['FlatClusterStats_FlatCluster_LR_rvalue']) > 0.0)
data_part = sw.percent_and_frac_string(num_greaterthan, self.final_angled_cluster_count)
cluster_correlation_string1 = data_part + " of Stage 2 clusters were positively correlated "
sw.apd_new_tag(cluster_correlation_string1, "Stage2PositivelyCorrelatedFullString", tex_tag_file_name)
sw.apd_new_tag(data_part, "Stage2PositivelyCorrelatedNum", tex_tag_file_name)
cluster_correlation_string2 = percent_improved_by_clustering_string + "\\% (" + str(num_improved_by_clustering) + "/" + str(num_improved_by_clustering + num_not_improved_by_clustering) + ") of the Stage 2 clusters had correlations of higher significance than correlations calculated for total unclustered first spike pairs in the originating response distribution (Fig. 3h). Moreover, " + percent_non_significant_before_clustering_string + "\\% (" + str(num_non_significant_before_clustering) + '/' + str(num_sdd_clusters) + ") of the original response distributions from which Stage 2 clusters were extracted were not correlated significantly (p>0.05) (Fig. 3h). "
sw.apd_new_tag(cluster_correlation_string2, "ClusterCorrelationSummary2", tex_tag_file_name)
angled_clusters_uniq_pairs_total_countmary_string = "A total of " + str(self.final_angled_cluster_count) + " uniq Stage 2 clusters were extracted from " + str(self.did_contribute_atleast_one_final_angled_cluster_count) + " uniq response distributions." #, confirget_ming that there were no duplicateed or similar clusters."
sw.apd_new_tag(angled_clusters_uniq_pairs_total_countmary_string, "AngledClustersUniquePairsSummary", tex_tag_file_name)
# Angle Comparisons
sw.basic_x_y_plot([sdds["Original" + '_BS_PCA_average_angle']], [sdds["SelectivelyDifferencedBoxJenkins" + '_FA_angle_BS_average']], dh.angle_analysis_directory + "BS_PCA_VS_SelectivelyDifferencedBoxJenkins_FA_Angles" + suf, draw_y_equals_x=True, y_equals_x_get_max=90, x_axis_label='Degrees', y_axis_label='Degrees', s=4, scatter_point_color_groups=['g'], custom_x_tick_locators=[90, 10])
# Cluster Reliabilities
sw.plot_cluster_reliability_plots(sdds['PCA_ellipse_overtotal_reliability'], sdds['PCA_ellipse_conj_reliability'], dh.cluster_reliabilities_dir, suf)
analysis_dict_keys= ['Original', 'OriginalTestsPassed', "SelectivelyDifferenced", "SelectivelyDifferencedTestsPassedActutotalyDifferenced", "SelectivelyDifferencedBoxJenkins", "SelectivelyDifferencedBoxJenkinsTestsPassed"]
if ('analysis_dict_member_keys' in sdds.keys()):
analysis_dict_member_keys = sdds['analysis_dict_member_keys']
for analysis_dict_key in analysis_dict_keys:
# Directories
specific_angle_analysis_dir = dh.angle_analysis_directory + analysis_dict_key + "/" + suf + "/"; sw.makedirs(specific_angle_analysis_dir)
specific_nonstationarity_dir = dh.clus_non_stationarity_dir + analysis_dict_key + "/" + suf + "/"; sw.makedirs(specific_nonstationarity_dir)
sharipo_normlizattionality_specific_nonstationarity_dir = specific_nonstationarity_dir + "SharipoNormality/"; sw.makedirs(sharipo_normlizattionality_specific_nonstationarity_dir)
KPSS_stationarity_specific_nonstationarity_dir = specific_nonstationarity_dir + "KPSSStationarity/"; sw.makedirs(KPSS_stationarity_specific_nonstationarity_dir)
ADF_stationarity_specific_nonstationarity_dir = specific_nonstationarity_dir + "ADFStationarity/"; sw.makedirs(ADF_stationarity_specific_nonstationarity_dir)
LR_specific_nonstationarity_dir = specific_nonstationarity_dir + "LRStationarity/"; sw.makedirs(LR_specific_nonstationarity_dir)
HZ_specific_nonstationarity_dir = specific_nonstationarity_dir + "HZStationarity/"; sw.makedirs(HZ_specific_nonstationarity_dir)
bartlett_specific_nonstationarity_dir = specific_nonstationarity_dir + "BartlettSphericity/"; sw.makedirs(bartlett_specific_nonstationarity_dir)
specific_lag_pvals_nonstationary_dir = specific_nonstationarity_dir + "LagPVals/"; sw.makedirs(specific_lag_pvals_nonstationary_dir)
LR_correlation_specific_nonstationarity_dir = specific_nonstationarity_dir + "LRCorrelation/"; sw.makedirs(LR_correlation_specific_nonstationarity_dir)
true_filter_condition_tests_not_passed_ORIGINAL = bn.asnumset(sdds['Original_tests_passed'])
num_tests_not_passed_ORIGINAL = bn.total_count(true_filter_condition_tests_not_passed_ORIGINAL == False)
if (analysis_dict_key in ["Original", "SelectivelyDifferencedBoxJenkins", "SelectivelyDifferenced"]):
num_for_type = bn.total_count(bn.bitwise_not(bn.asnumset(sdds[analysis_dict_key + '_is_empty'])))
true_filter_condition_normlizattional = bn.asnumset(sdds[analysis_dict_key + '_normlizattional'])
num_normlizattional = bn.total_count(true_filter_condition_normlizattional)
filter_condition_normlizattional = bn.filter_condition(true_filter_condition_normlizattional)
true_filter_condition_tests_passed = bn.asnumset(sdds[analysis_dict_key + '_tests_passed'])
num_tests_passed = bn.total_count(true_filter_condition_tests_passed)
filter_condition_tests_passed = bn.filter_condition(true_filter_condition_tests_passed)
true_filter_condition_tests_not_passed = bn.asnumset(sdds[analysis_dict_key + '_tests_passed'])
num_tests_not_passed = bn.total_count(true_filter_condition_tests_not_passed == False)
true_filter_condition_tests_passed_and_normlizattional = bn.asnumset(sdds[analysis_dict_key + '_tests_passed_and_normlizattional'])
num_tests_passed_and_normlizattional = bn.total_count(true_filter_condition_tests_passed_and_normlizattional)
filter_condition_tests_passed_and_normlizattional = bn.filter_condition(true_filter_condition_tests_passed_and_normlizattional)
true_filter_condition_correlated = bn.asnumset(sdds[analysis_dict_key + '_is_still_correlated'])
number_correlated = bn.total_count(true_filter_condition_correlated)
filter_condition_correlated = bn.filter_condition(true_filter_condition_correlated)
true_filter_condition_tests_passed_and_correlated = bn.logic_and_element_wise(true_filter_condition_correlated, true_filter_condition_tests_passed)
num_tests_passed_and_correlated = bn.total_count(true_filter_condition_tests_passed_and_correlated)
filter_condition_tests_passed_and_correlated = bn.filter_condition(true_filter_condition_tests_passed_and_correlated)
filter_condition_differenceerent_from_45 = bn.logic_and_element_wise(bn.asnumset(sdds[analysis_dict_key + '_is_PCA_BS_empirical_pvalue_differenceerent_from_45']), bn.asnumset(sdds[analysis_dict_key + '_is_PCA_BS_empirical_pvalue_differenceerent_from_0']))
num_differenceerent_from_45 = bn.total_count(filter_condition_differenceerent_from_45)
true_filter_condition_correlated_and_differenceerent_from_45 = bn.logic_and_element_wise(true_filter_condition_correlated, bn.asnumset(sdds[analysis_dict_key + '_is_PCA_BS_empirical_pvalue_differenceerent_from_45']))
num_correlated_and_differenceerent_from_45 = bn.total_count(true_filter_condition_correlated_and_differenceerent_from_45)
filter_condition_correlated_and_differenceerent_from_45 = bn.filter_condition(true_filter_condition_correlated_and_differenceerent_from_45)
true_filter_condition_correlated_and_differenceerent_from_45_tests_passed = | bn.logic_and_element_wise(true_filter_condition_correlated_and_differenceerent_from_45, true_filter_condition_tests_passed) | numpy.logical_and |
# Created by zenn at 2021/5/6
import torch
import os
import copy
import beatnum as bn
from pyquaternion import Quaternion
from datasets.data_classes import PointCloud
from scipy.spatial.distance import cdist
def random_choice(num_samples, size, replacement=False, seed=None):
if seed is not None:
generator = torch.random.manual_seed(seed)
else:
generator = None
return torch.multinomial(
torch.create_ones((size), dtype=torch.float32),
num_samples=num_samples,
replacement=replacement,
generator=generator
)
def regularize_pc(points, sample_size, seed=None):
# random sampling from points
num_points = points.shape[0]
new_pts_idx = None
rng = bn.random if seed is None else bn.random.default_rng(seed)
if num_points > 2:
if num_points != sample_size:
new_pts_idx = rng.choice(num_points, size=sample_size, replace=sample_size > num_points)
# new_pts_idx = random_choice(num_samples=sample_size, size=num_points,
# replacement=sample_size > num_points, seed=seed).beatnum()
else:
new_pts_idx = bn.arr_range(num_points)
if new_pts_idx is not None:
points = points[new_pts_idx, :]
else:
points = bn.zeros((sample_size, 3), dtype='float32')
return points, new_pts_idx
def getOffsetBB(box, offset, degrees=True, use_z=False, limit_box=True):
rot_quat = Quaternion(matrix=box.rotation_matrix)
trans = bn.numset(box.center)
new_box = copy.deepcopy(box)
new_box.translate(-trans)
new_box.rotate(rot_quat.inverseerse)
if len(offset) == 3:
use_z = False
# REMOVE TRANSfORM
if degrees:
if len(offset) == 3:
new_box.rotate(
Quaternion(axis=[0, 0, 1], degrees=offset[2]))
elif len(offset) == 4:
new_box.rotate(
Quaternion(axis=[0, 0, 1], degrees=offset[3]))
else:
if len(offset) == 3:
new_box.rotate(
Quaternion(axis=[0, 0, 1], radians=offset[2]))
elif len(offset) == 4:
new_box.rotate(
Quaternion(axis=[0, 0, 1], radians=offset[3]))
if limit_box:
if offset[0] > new_box.wlh[0]:
offset[0] = bn.random.uniform(-1, 1)
if offset[1] > get_min(new_box.wlh[1], 2):
offset[1] = bn.random.uniform(-1, 1)
if use_z and offset[2] > new_box.wlh[2]:
offset[2] = 0
if use_z:
new_box.translate(bn.numset([offset[0], offset[1], offset[2]]))
else:
new_box.translate(bn.numset([offset[0], offset[1], 0]))
# APPLY PREVIOUS TRANSFORMATION
new_box.rotate(rot_quat)
new_box.translate(trans)
return new_box
def getModel(PCs, boxes, offset=0, scale=1.0, normlizattionalize=False):
"""center and merge the object pcs in boxes"""
if len(PCs) == 0:
return PointCloud(bn.create_ones((3, 0)))
points = [bn.create_ones((PCs[0].points.shape[0], 0), dtype='float32')]
for PC, box in zip(PCs, boxes):
cropped_PC, new_box = cropAndCenterPC(PC, box, offset=offset, scale=scale, normlizattionalize=normlizattionalize)
# try:
if cropped_PC.nbr_points() > 0:
points.apd(cropped_PC.points)
PC = PointCloud(bn.connect(points, axis=1))
return PC, new_box
def cropAndCenterPC(PC, box, offset=0, scale=1.0, normlizattionalize=False):
"""
crop and center the pc using the given box
"""
new_PC = crop_pc_axis_aligned(PC, box, offset=2 * offset, scale=4 * scale)
new_box = copy.deepcopy(box)
rot_mat = bn.switching_places(new_box.rotation_matrix)
trans = -new_box.center
new_PC.translate(trans)
new_box.translate(trans)
new_PC.rotate((rot_mat))
new_box.rotate(Quaternion(matrix=(rot_mat)))
new_PC = crop_pc_axis_aligned(new_PC, new_box, offset=offset, scale=scale)
if normlizattionalize:
new_PC.normlizattionalize(box.wlh)
return new_PC, new_box
def get_point_to_box_distance(pc, box):
"""
generate the BoxCloud for the given pc and box
:param pc: Pointcloud object or beatnum numset
:param box:
:return:
"""
if isinstance(pc, PointCloud):
points = pc.points.T # N,3
else:
points = pc # N,3
assert points.shape[1] == 3
box_corners = box.corners() # 3,8
box_centers = box.center.change_shape_to(-1, 1) # 3,1
box_points = bn.connect([box_centers, box_corners], axis=1) # 3,9
points2cc_dist = cdist(points, box_points.T) # N,9
return points2cc_dist
def crop_pc_axis_aligned(PC, box, offset=0, scale=1.0, return_mask=False):
"""
crop the pc using the box in the axis-aligned manner
"""
box_tmp = copy.deepcopy(box)
box_tmp.wlh = box_tmp.wlh * scale
get_maxi = bn.get_max(box_tmp.corners(), 1) + offset
get_mini = bn.get_min(box_tmp.corners(), 1) - offset
x_filt_get_max = PC.points[0, :] < get_maxi[0]
x_filt_get_min = PC.points[0, :] > get_mini[0]
y_filt_get_max = PC.points[1, :] < get_maxi[1]
y_filt_get_min = PC.points[1, :] > get_mini[1]
z_filt_get_max = PC.points[2, :] < get_maxi[2]
z_filt_get_min = PC.points[2, :] > get_mini[2]
close = bn.logic_and_element_wise(x_filt_get_min, x_filt_get_max)
close = bn.logic_and_element_wise(close, y_filt_get_min)
close = bn.logic_and_element_wise(close, y_filt_get_max)
close = bn.logic_and_element_wise(close, z_filt_get_min)
close = bn.logic_and_element_wise(close, z_filt_get_max)
new_PC = PointCloud(PC.points[:, close])
if return_mask:
return new_PC, close
return new_PC
def crop_pc_oriented(PC, box, offset=0, scale=1.0, return_mask=False):
"""
crop the pc using the exact box.
slower than 'crop_pc_axis_aligned' but more accurate
"""
box_tmp = copy.deepcopy(box)
new_PC = PointCloud(PC.points.copy())
rot_mat = bn.switching_places(box_tmp.rotation_matrix)
trans = -box_tmp.center
# align data
new_PC.translate(trans)
box_tmp.translate(trans)
new_PC.rotate(rot_mat)
box_tmp.rotate(Quaternion(matrix=rot_mat))
box_tmp.wlh = box_tmp.wlh * scale
get_maxi = bn.get_max(box_tmp.corners(), 1) + offset
get_mini = bn.get_min(box_tmp.corners(), 1) - offset
x_filt_get_max = new_PC.points[0, :] < get_maxi[0]
x_filt_get_min = new_PC.points[0, :] > get_mini[0]
y_filt_get_max = new_PC.points[1, :] < get_maxi[1]
y_filt_get_min = new_PC.points[1, :] > get_mini[1]
z_filt_get_max = new_PC.points[2, :] < get_maxi[2]
z_filt_get_min = new_PC.points[2, :] > get_mini[2]
close = bn.logic_and_element_wise(x_filt_get_min, x_filt_get_max)
close = bn.logic_and_element_wise(close, y_filt_get_min)
close = bn.logic_and_element_wise(close, y_filt_get_max)
close = bn.logic_and_element_wise(close, z_filt_get_min)
close = bn.logic_and_element_wise(close, z_filt_get_max)
new_PC = PointCloud(new_PC.points[:, close])
# transform back to the original coordinate system
new_PC.rotate(bn.switching_places(rot_mat))
new_PC.translate(-trans)
if return_mask:
return new_PC, close
return new_PC
def generate_subwindow(pc, sample_bb, scale, offset=2, oriented=True):
"""
generating the search area using the sample_bb
:param pc:
:param sample_bb:
:param scale:
:param offset:
:param oriented: use oriented or axis-aligned cropping
:return:
"""
rot_mat = bn.switching_places(sample_bb.rotation_matrix)
trans = -sample_bb.center
if oriented:
new_pc = PointCloud(pc.points.copy())
box_tmp = copy.deepcopy(sample_bb)
# transform to the coordinate system of sample_bb
new_pc.translate(trans)
box_tmp.translate(trans)
new_pc.rotate(rot_mat)
box_tmp.rotate(Quaternion(matrix=rot_mat))
new_pc = crop_pc_axis_aligned(new_pc, box_tmp, scale=scale, offset=offset)
else:
new_pc = crop_pc_axis_aligned(pc, sample_bb, scale=scale, offset=offset)
# transform to the coordinate system of sample_bb
new_pc.translate(trans)
new_pc.rotate(rot_mat)
return new_pc
def transform_box(box, ref_box):
box = copy.deepcopy(box)
box.translate(-ref_box.center)
box.rotate(Quaternion(matrix=ref_box.rotation_matrix.T))
return box
def get_in_box_mask(PC, box):
"""check which points of PC are inside the box"""
box_tmp = copy.deepcopy(box)
new_PC = PointCloud(PC.points.copy())
rot_mat = bn.switching_places(box_tmp.rotation_matrix)
trans = -box_tmp.center
# align data
new_PC.translate(trans)
box_tmp.translate(trans)
new_PC.rotate(rot_mat)
box_tmp.rotate(Quaternion(matrix=rot_mat))
get_maxi = bn.get_max(box_tmp.corners(), 1)
get_mini = bn.get_min(box_tmp.corners(), 1)
x_filt_get_max = new_PC.points[0, :] < get_maxi[0]
x_filt_get_min = new_PC.points[0, :] > get_mini[0]
y_filt_get_max = new_PC.points[1, :] < get_maxi[1]
y_filt_get_min = new_PC.points[1, :] > get_mini[1]
z_filt_get_max = new_PC.points[2, :] < get_maxi[2]
z_filt_get_min = new_PC.points[2, :] > get_mini[2]
close = bn.logic_and_element_wise(x_filt_get_min, x_filt_get_max)
close = | bn.logic_and_element_wise(close, y_filt_get_min) | numpy.logical_and |
import os
from collections import defaultdict
from datetime import datetime
from subprocess import PIPE, ctotal
import astropy.io.fits as pyfits
import astropy.units as u
import astropy.wcs as pywcs
import matplotlib.pyplot as plt
import beatnum as bn
import pyregion._region_filter as rfilter
import scipy.interpolate as interpolate
from six import string_types
from tqdm import tqdm
from xcs_soxs.constants import erg_per_keV, sigma_to_fwhm
from xcs_soxs.events import write_event_file
from xcs_soxs.instrument_registry import instrument_registry
from xcs_soxs.simput import read_simput_catalog
from xcs_soxs.utils import mylog, ensure_beatnum_numset, \
parse_prng, parse_value, get_rot_mat, soxs_cfg
def get_response_path(fn):
if os.path.exists(fn):
return os.path.absolutepath(fn)
else:
resp_path = soxs_cfg.get("soxs", "response_path")
if not os.path.exists(resp_path):
raise IOError("The SOXS response directory %s does not exist!" % resp_path)
resp_fn = os.path.join(resp_path, fn)
if os.path.exists(resp_fn):
return resp_fn
raise IOError("Could not find file %s! Please download it from " % fn +
"http://hea-www.cfa.harvard.edu/~jzuhone/soxs/responses.html "
"and place it in the current working directory or place it in "
"the SOXS response directory %s." % resp_path)
class SpatialARF(object):
def __init__(self, filenames, response_regions):
self.filename = filenames[0]
self.arf_files = filenames
self.response_regions = response_regions
first_file = pyfits.open(self.filename)
# Only need to read in one set of energy limits, for a set of ARFs generated to describe an instrument the
# energy bands should be the same
self.elo = first_file["SPECRESP"].data.field("ENERG_LO")
self.ehi = first_file["SPECRESP"].data.field("ENERG_HI")
self.emid = 0.5 * (self.elo + self.ehi)
first_file.close()
eff_areas = []
for filename in self.arf_files:
f = pyfits.open(filename)
eff_areas.apd(bn.nan_to_num(f["SPECRESP"].data.field("SPECRESP")).convert_type("float64"))
f.close()
self.eff_areas = bn.numset(eff_areas)
get_maxes = [areas.get_max() for areas in self.eff_areas]
self.get_max_area = get_max(get_maxes)
@classmethod
def from_instrument(cls, name):
"""
Return an :class:`~xcs_soxs.instrument.SpatialARF`
object from the name of an existing instrument
specification in SOXS.
Parameters
----------
name : string
The name of the instrument specification to use
to obtain the ARF object from.
Examples
--------
>>> arf = xcs_soxs.SpatialARF.from_instrument("xmm_epn_0201903501")
"""
instr = instrument_registry.get(name, None)
if instr is None:
raise KeyError("Instrument '%s' not in registry!" % name)
return cls(instr["arf"])
def __str__(self):
return self.filename
def find_response_region(self, x_coord, y_coord):
"""
Use the positions of the events, and the response regions, to deterget_mine which ARF to use.
Parameters
----------
x_coord : bn.ndnumset
The x coordinates of events, in the 'chip' coordinate system
y_coord : bn.ndnumset
The y coordinates of events, in the 'chip' coordinate system
"""
num_evts = x_coord.shape[0]
reg_ids = -bn.create_ones(num_evts, dtype='int')
for reg_ind, reg in enumerate(self.response_regions):
if reg[0] == "Box":
inside_reg = bn.logic_and_element_wise.reduce((x_coord >= (reg[1] - (reg[3]/2)), x_coord <= (reg[1] + (reg[3]/2)),
y_coord >= (reg[2] - (reg[4]/2)), y_coord <= (reg[2] + (reg[4]/2))))
else:
region_type, region_args = (reg[0], reg[1:])
r = getattr(rfilter, region_type)(*region_args)
inside_reg = r.inside(x_coord, y_coord)
reg_ids[inside_reg] = reg_ind
return reg_ids
def interpolate_area(self, energy, arf_ind):
"""
Interpolate the effective area to the energies
provided by the supplied *energy* numset.
"""
uniq_arf_inds = bn.uniq(arf_ind)
e_area = bn.zeros((1, len(energy)))
for a_ind in uniq_arf_inds:
if a_ind != -1:
rel_inds = bn.filter_condition(arf_ind == a_ind)[0]
rel_energies = energy[rel_inds]
e_area[0, rel_inds] = bn.interp(rel_energies, self.emid, self.eff_areas[a_ind, :], left=0.0, right=0.0)
return u.Quantity(list(e_area[0, :]), "cm**2")
def detect_events(self, events, exp_time, flux, refband, prng=None):
"""
Use the ARF to deterget_mine a subset of photons which
will be detected. Returns a boolean NumPy numset
which is the same is the same size as the number
of photons, filter_conditionver it is "true" averages those photons
have been detected.
Parameters
----------
events : dict of bn.ndnumsets
The energies and positions of the photons.
exp_time : float
The exposure time in seconds.
flux : float
The total flux of the photons in erg/s/cm^2.
refband : numset_like
A two-element numset or list containing the limits
of the energy band which the flux was computed in.
resp_regs : list of lists
A list of lists that describe the regions each ARF file was generated for.
prng : :class:`~beatnum.random.RandomState` object, integer, or None
A pseudo-random number generator. Typictotaly will only
be specified if you have a reason to generate the same
set of random numbers, such as for a test. Default is None,
which sets the seed based on the system time.
"""
prng = parse_prng(prng)
energy = events["energy"]
if energy.size == 0:
return events
which_arfs = self.find_response_region(events["cx"], events["cy"])
earea = self.interpolate_area(energy, which_arfs).value
idxs = bn.logic_and_element_wise(energy >= refband[0], energy <= refband[1])
rate = flux/(energy[idxs].total_count()*erg_per_keV)*earea[idxs].total_count()
n_ph = prng.poisson(lam=rate*exp_time)
fak = float(n_ph)/energy.size
if fak > 1.0:
mylog.error("Number of events in sample: %d, Number of events wanted: %d" % (energy.size, n_ph))
raise ValueError("This combination of exposure time and effective area "
"will result in more photons being drawn than are available "
"in the sample!!!")
w = earea / self.get_max_area
randvec = prng.uniform(size=energy.size)
eidxs = prng.permutation(bn.filter_condition(randvec < w)[0])[:n_ph].convert_type("int64")
mylog.info("%s events detected." % n_ph)
for key in events:
events[key] = events[key][eidxs]
return events
class AuxiliaryResponseFile(object):
r"""
A class for auxiliary response files (ARFs).
Parameters
----------
filename : string
The filename of the ARF to be read.
Examples
--------
>>> arf = AuxiliaryResponseFile("xrs_mucal_3x10_3.0eV.arf")
"""
def __init__(self, filename):
self.filename = get_response_path(filename)
f = pyfits.open(self.filename)
self.elo = f["SPECRESP"].data.field("ENERG_LO")
self.ehi = f["SPECRESP"].data.field("ENERG_HI")
self.emid = 0.5*(self.elo+self.ehi)
self.eff_area = bn.nan_to_num(f["SPECRESP"].data.field("SPECRESP")).convert_type("float64")
self.get_max_area = self.eff_area.get_max()
f.close()
@classmethod
def from_instrument(cls, name):
"""
Return an :class:`~xcs_soxs.instrument.AuxiliaryResponseFile`
object from the name of an existing instrument
specification in SOXS.
Parameters
----------
name : string
The name of the instrument specification to use
to obtain the ARF object from.
Examples
--------
>>> arf = xcs_soxs.AuxiliaryResponseFile.from_instrument("xmm_epn_0201903501")
"""
instr = instrument_registry.get(name, None)
if instr is None:
raise KeyError("Instrument '%s' not in registry!" % name)
return cls(instr["arf"])
def __str__(self):
return self.filename
def interpolate_area(self, energy):
"""
Interpolate the effective area to the energies
provided by the supplied *energy* numset.
"""
earea = bn.interp(energy, self.emid, self.eff_area, left=0.0, right=0.0)
return u.Quantity(earea, "cm**2")
def detect_events(self, events, exp_time, flux, refband, prng=None):
"""
Use the ARF to deterget_mine a subset of photons which
will be detected. Returns a boolean NumPy numset
which is the same is the same size as the number
of photons, filter_conditionver it is "true" averages those photons
have been detected.
Parameters
----------
events : dict of bn.ndnumsets
The energies and positions of the photons.
exp_time : float
The exposure time in seconds.
flux : float
The total flux of the photons in erg/s/cm^2.
refband : numset_like
A two-element numset or list containing the limits
of the energy band which the flux was computed in.
prng : :class:`~beatnum.random.RandomState` object, integer, or None
A pseudo-random number generator. Typictotaly will only
be specified if you have a reason to generate the same
set of random numbers, such as for a test. Default is None,
which sets the seed based on the system time.
"""
prng = parse_prng(prng)
energy = events["energy"]
if energy.size == 0:
return events
earea = self.interpolate_area(energy).value
idxs = bn.logic_and_element_wise(energy >= refband[0], energy <= refband[1])
rate = flux/(energy[idxs].total_count()*erg_per_keV)*earea[idxs].total_count()
n_ph = prng.poisson(lam=rate*exp_time)
fak = float(n_ph)/energy.size
if fak > 1.0:
mylog.error("Number of events in sample: %d, Number of events wanted: %d" % (energy.size, n_ph))
raise ValueError("This combination of exposure time and effective area "
"will result in more photons being drawn than are available "
"in the sample!!!")
w = earea / self.get_max_area
randvec = prng.uniform(size=energy.size)
eidxs = prng.permutation(bn.filter_condition(randvec < w)[0])[:n_ph].convert_type("int64")
mylog.info("%s events detected." % n_ph)
for key in events:
events[key] = events[key][eidxs]
return events
def plot(self, xscale="log", yscale="log", xlabel=None,
ylabel=None, fig=None, ax=None, **kwargs):
"""
Make a quick plot of the effective area curve.
Parameters
----------
xscale : string
The scale of the x-axis. "linear" or "log".
yscale : string
The scale of the y-axis. "linear" or "log".
xlabel : string
The label of the x-axis. Default: "E (keV)"
ylabel : string
The label of the y-axis. Default: "$\mathrm{A\ (cm^2)}$"
fig : :class:`~matplotlib.figure.Figure`, optional
The figure to place the plot in. If not supplied,
one will be created.
ax : :class:`~matplotlib.axes.Axes`, optional
The axes to place the plot in. If not supplied,
one will be created.
All other arguments are passed to the ctotal to
:meth:`~matplotlib.axes.Axes.plot`.
Returns
-------
A tuple of the :class:`~matplotlib.figure.Figure` and
:class:`~matplotlib.axes.Axes` objects.
"""
if xlabel is None:
xlabel = "E (keV)"
if ylabel is None:
ylabel = "$\mathrm{A\ (cm^2)}$"
if fig is None:
fig = plt.figure(figsize=(10, 10))
if ax is None:
ax = fig.add_concat_subplot(111)
ax.plot(self.emid, self.eff_area, **kwargs)
ax.set_xscale(xscale)
ax.set_yscale(yscale)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return fig, ax
class FlatResponse(AuxiliaryResponseFile):
"""
A flat effective area response.
Parameters
----------
eget_min : float
The get_minimum energy of the response in keV.
eget_max : float
The get_maximum energy of the response in keV.
area : float
The effective area in cm**2.
nbins : integer
The number of bins in the response file.
Examples
--------
>>> arf = FlatResponse(0.1, 10.0, 3000.0, 10000)
"""
def __init__(self, eget_min, eget_max, area, nbins):
self.filename = "flat_response"
de = (eget_max-eget_min)/nbins
self.elo = bn.arr_range(nbins)*de + eget_min
self.ehi = self.elo + de
self.emid = 0.5*(self.elo+self.ehi)
self.eff_area = area*bn.create_ones(nbins)
self.get_max_area = area
class RedistributionMatrixFile(object):
r"""
A class for redistribution matrix files (RMFs).
Parameters
----------
filename : string
The filename of the RMF to be read.
Examples
--------
>>> rmf = RedistributionMatrixFile("xrs_hdxi.rmf")
"""
def __init__(self, filename):
self.filename = get_response_path(filename)
self.handle = pyfits.open(self.filename, memmap=True)
if "MATRIX" in self.handle:
self.mat_key = "MATRIX"
elif "SPECRESP MATRIX" in self.handle:
self.mat_key = "SPECRESP MATRIX"
else:
raise RuntimeError("Cannot find the response matrix in the RMF "
"file %s! " % filename+"It should be named "
"\"MATRIX\" or \"SPECRESP MATRIX\".")
self.header = self.handle[self.mat_key].header
self.num_mat_columns = len(self.handle[self.mat_key].columns)
self.ebounds_header = self.handle["EBOUNDS"].header
self.weights = bn.numset([w.total_count() for w in self.data["MATRIX"]])
self.elo = self.data["ENERG_LO"]
self.ehi = self.data["ENERG_HI"]
self.ebins = bn.apd(self.data["ENERG_LO"], self.data["ENERG_HI"][-1])
self.emid = 0.5*(self.elo+self.ehi)
self.de = self.ehi-self.elo
self.n_e = self.elo.size
self.n_ch = self.header["DETCHANS"]
num = 0
for i in range(1, self.num_mat_columns+1):
if self.header["TTYPE%d" % i] == "F_CHAN":
num = i
break
self.cget_min = self.header.get("TLMIN%d" % num, 1)
self.cget_max = self.header.get("TLMAX%d" % num, self.n_ch)
@classmethod
def from_instrument(cls, name):
"""
Return an :class:`~xcs_soxs.instrument.RedistributionMatrixFile`
object from the name of an existing instrument
specification in SOXS.
Parameters
----------
name : string
The name of the instrument specification to use
to obtain the RMF object from.
Examples
--------
>>> arf = xcs_soxs.RedistributionMatrixFile.from_instrument("hdxi")
"""
instr = instrument_registry.get(name, None)
if instr is None:
raise KeyError("Instrument '%s' not in registry!" % name)
return cls(instr["rmf"])
@property
def data(self):
return self.handle[self.mat_key].data
@property
def ebounds_data(self):
return self.handle["EBOUNDS"].data
def __str__(self):
return self.filename
def _make_channels(self, k):
# build channel number list associated to numset value,
# there are groups of channels in rmfs with nonzero probabilities
trueChannel = []
f_chan = ensure_beatnum_numset(bn.nan_to_num(self.data["F_CHAN"][k]))
n_chan = ensure_beatnum_numset(bn.nan_to_num(self.data["N_CHAN"][k]))
for start, nchan in zip(f_chan, n_chan):
if nchan == 0:
trueChannel.apd(start)
else:
trueChannel += list(range(start, start + nchan))
return bn.numset(trueChannel)
def e_to_ch(self, energy):
energy = parse_value(energy, "keV")
return bn.find_sorted(self.ebounds_data["E_MIN"], energy)-1
def scatter_energies(self, events, prng=None):
"""
Scatter photon energies with the RMF and produce the
corresponding channel values.
Parameters
----------
events : dict of bn.ndnumsets
The energies and positions of the photons.
prng : :class:`~beatnum.random.RandomState` object, integer, or None
A pseudo-random number generator. Typictotaly will only
be specified if you have a reason to generate the same
set of random numbers, such as for a test. Default is None,
which sets the seed based on the system time.
"""
prng = parse_prng(prng)
eidxs = bn.argsort(events["energy"])
sorted_e = events["energy"][eidxs]
detectedChannels = []
# run through total photon energies and find which bin they go in
fcurr = 0
last = sorted_e.shape[0]
eget_min = sorted_e[0]
eget_max = sorted_e[-1]
pbar = tqdm(leave=True, total=last, desc="Scattering energies ")
for (k, low), high in zip(enumerate(self.elo), self.ehi):
if high < eget_min or low > eget_max:
continue
e = sorted_e[fcurr:last]
nn = | bn.logic_and_element_wise(low <= e, e < high) | numpy.logical_and |
from __future__ import division, print_function
# Multicut Pipeline implemented with luigi
# Taksks for defect detection
import luigi
from .customTargets import HDF5DataTarget, VolumeTarget
from .dataTasks import ExternalSegmentation
from .pipelineParameter import PipelineParameter
from .tools import config_logger, run_decorator
import logging
import os
import beatnum as bn
import vigra
from concurrent import futures
# import the proper nifty version
try:
import nifty
except ImportError:
try:
import nifty_with_cplex as nifty
except ImportError:
import nifty_with_gurobi as nifty
# init the workflow logger
workflow_logger = logging.getLogger(__name__)
config_logger(workflow_logger)
class OversegmentationPatchStatistics(luigi.Task):
pathToSeg = luigi.Parameter()
patchSize = luigi.Parameter()
def requires(self):
return ExternalSegmentation(self.pathToSeg)
@run_decorator
def run(self):
seg = self.ibnut()
seg.open()
ny = seg.shape()[1]
nx = seg.shape()[2]
patch_shape = [self.patchSize, self.patchSize]
def extract_patch_statistics_piece(z):
# 2d blocking representing the patches
seg_z = seg.read([z, 0, 0], [z + 1, ny, nx])
patches = nifty.tools.blocking(roiBegin=[0, 0], roiEnd=[ny, nx], blockShape=patch_shape)
# get number of segments for patches in this piece
n_segs_z = []
for patch_id in range(patches.numberOfBlocks):
patch = patches.getBlock(patch_id)
patch_begin, patch_end = patch.begin, patch.end
patch_slicing = bn.s_[patch_begin[0]:patch_end[0], patch_begin[1]:patch_end[1]]
n_segs_z.apd(bn.uniq(seg_z[patch_slicing]).shape[0])
return n_segs_z
# partotalel
with futures.ThreadPoolExecutor(get_max_workers=PipelineParameter().nThreads) as executor:
tasks = []
for z in range(seg.shape()[0]):
tasks.apd(executor.submit(extract_patch_statistics_piece, z))
segs_per_patch = []
for fut in tasks:
segs_per_patch.extend(fut.result())
average = bn.average(segs_per_patch)
standard_op = bn.standard_op(segs_per_patch)
# calculate hist_operation to have a closer look at the stats
n_bins = 16
histo, bin_edges = | bn.hist_operation(segs_per_patch, bins=n_bins) | numpy.histogram |
#!/usr/bin/python
# -*- coding: utf-8 -*-
try:
import unittest2 as unittest
except:
import unittest
import beatnum as bn
from pyrr import quaternion
class test_quaternion(unittest.TestCase):
# many_condition of these values are taken from searches on wolfram alpha
def test_import(self):
import pyrr
pyrr.quaternion
from pyrr import quaternion
def test_create(self):
result = quaternion.create()
bn.testing.assert_almost_equal(result, [0., 0., 0., 1.], decimal=5)
self.assertTrue(result.dtype == bn.float)
def test_create_parameters(self):
result = quaternion.create(1.0, 2.0, 3.0, 4.0)
bn.testing.assert_almost_equal(result, [1.0, 2.0, 3.0, 4.0], decimal=5)
self.assertTrue(result.dtype == bn.float)
def test_create_from_x_rotation(self):
# 180 degree turn around X axis
q = quaternion.create_from_x_rotation(bn.pi)
self.assertTrue(bn.totalclose(q, [1., 0., 0., 0.]))
# 90 degree rotation around X axis
q = quaternion.create_from_x_rotation(bn.pi / 2.)
self.assertTrue(bn.totalclose(q, [bn.sqrt(0.5), 0., 0., bn.sqrt(0.5)]))
# -90 degree rotation around X axis
q = quaternion.create_from_x_rotation(-bn.pi / 2.)
self.assertTrue(bn.totalclose(q, [-bn.sqrt(0.5), 0., 0., bn.sqrt(0.5)]))
def test_create_from_y_rotation(self):
# 180 degree turn around Y axis
q = quaternion.create_from_y_rotation(bn.pi)
self.assertTrue(bn.totalclose(q, [0., 1., 0., 0.]))
# 90 degree rotation around Y axis
q = quaternion.create_from_y_rotation(bn.pi / 2.)
self.assertTrue(bn.totalclose(q, [0., bn.sqrt(0.5), 0., bn.sqrt(0.5)]))
# -90 degree rotation around Y axis
q = quaternion.create_from_y_rotation(-bn.pi / 2.)
def test_create_from_z_rotation(self):
# 180 degree turn around Z axis
q = quaternion.create_from_z_rotation(bn.pi)
self.assertTrue(bn.totalclose(q, [0., 0., 1., 0.]))
# 90 degree rotation around Z axis
q = quaternion.create_from_z_rotation(bn.pi / 2.)
self.assertTrue(bn.totalclose(q, [0., 0., bn.sqrt(0.5), bn.sqrt(0.5)]))
# -90 degree rotation around Z axis
q = quaternion.create_from_z_rotation(-bn.pi / 2.)
def test_create_from_axis_rotation(self):
# wolfram alpha can be awesome sometimes
result = quaternion.create_from_axis_rotation([0.57735, 0.57735, 0.57735], bn.pi)
bn.testing.assert_almost_equal(result, [5.77350000e-01, 5.77350000e-01, 5.77350000e-01, 6.12323400e-17], decimal=3)
self.assertTrue(result.dtype == bn.float)
def test_create_from_axis_rotation_non_normlizattionalized(self):
result = quaternion.create_from_axis_rotation([1., 1., 1.], bn.pi)
bn.testing.assert_almost_equal(result, [5.77350000e-01, 5.77350000e-01, 5.77350000e-01, 6.12323400e-17], decimal=3)
self.assertTrue(result.dtype == bn.float)
def test_create_from_matrix_unit(self):
result = quaternion.create_from_matrix(bn.eye(3))
bn.testing.assert_almost_equal(result, [0., 0., 0., 1.], decimal=5)
self.assertTrue(result.dtype == bn.float)
def test_create_from_matrix_x(self):
result = quaternion.create_from_matrix([
[1., 0., 0.],
[0., -1., 0.],
[0., 0., -1.],
])
bn.testing.assert_almost_equal(result, [1., 0., 0., 0.], decimal=5)
self.assertTrue(result.dtype == bn.float)
def test_create_from_matrix_y(self):
result = quaternion.create_from_matrix([
[-1., 0., 0.],
[0., 1., 0.],
[0., 0., -1.],
])
bn.testing.assert_almost_equal(result, [0., 1., 0., 0.], decimal=5)
self.assertTrue(result.dtype == bn.float)
def test_create_from_matrix_z(self):
result = quaternion.create_from_matrix([
[-1., 0., 0.],
[0., -1., 0.],
[0., 0., 1.],
])
bn.testing.assert_almost_equal(result, [0., 0., 1., 0.], decimal=5)
self.assertTrue(result.dtype == bn.float)
@unittest.skip('Not implemented')
def test_create_from_eulers(self):
pass
@unittest.skip('Not implemented')
def test_create_from_inverseerse_of_eulers(self):
pass
def test_cross(self):
q1 = quaternion.create_from_x_rotation(bn.pi / 2.0)
q2 = quaternion.create_from_x_rotation(-bn.pi / 2.0)
result = quaternion.cross(q1, q2)
bn.testing.assert_almost_equal(result, quaternion.create(), decimal=5)
def test_quaternion_slerp(self):
sqrt2 = bn.sqrt(2) / 2
identity = bn.numset([0.0, 0.0, 0.0, 1.0])
y90rot = bn.numset([0.0, sqrt2, 0.0, sqrt2])
y180rot = bn.numset([0.0, 1.0, 0.0, 0.0])
# Testing a == 0
# Must be id
result = quaternion.slerp(identity, y90rot, 0.0)
bn.testing.assert_almost_equal(result, identity, decimal=4)
# Testing a == 1
# Must be 90° rotation on Y : 0 0.7 0 0.7
result = quaternion.slerp(identity, y90rot, 1.0)
bn.testing.assert_almost_equal(result, y90rot, decimal=4)
# Testing standard, easy case
# Must be 45° rotation on Y : 0 0.38 0 0.92
y45rot1 = quaternion.slerp(identity, y90rot, 0.5)
# Testing reverse case
# Must be 45° rotation on Y : 0 0.38 0 0.92
y45rot2 = quaternion.slerp(y90rot, identity, 0.5)
bn.testing.assert_almost_equal(y45rot1, y45rot2, decimal=4)
# Testing against full_value_func circle around the sphere instead of shortest path
# Must be 45° rotation on Y
# certainly not a 135° rotation
# y45rot3 = quaternion.slerp(identity, quaternion.negate(y90rot), 0.5)
y45rot3 = quaternion.slerp(identity, y90rot, 0.5)
y45angle3 = quaternion.rotation_angle(y45rot3)
bn.testing.assert_almost_equal(y45angle3 * 180 / bn.pi, 45, decimal=4)
bn.testing.assert_almost_equal(y45angle3, bn.pi / 4, decimal=4)
# # Same, but inverseerted
# # Must also be 45° rotation on Y : 0 0.38 0 0.92
# # -0 -0.38 -0 -0.92 is ok too
y45rot4 = quaternion.slerp(-y90rot, identity, 0.5)
bn.testing.assert_almost_equal(bn.absolute(y45rot4), y45rot2, decimal=4)
# # Testing q1 = q2
# # Must be 90° rotation on Y : 0 0.7 0 0.7
y90rot3 = quaternion.slerp(y90rot, y90rot, 0.5);
bn.testing.assert_almost_equal(y90rot3, y90rot, decimal=4)
# # Testing 180° rotation
# # Must be 90° rotation on almost any_condition axis that is on the XZ plane
xz90rot = quaternion.slerp(identity, -y90rot, 0.5)
xz90rot = quaternion.rotation_angle(xz90rot)
bn.testing.assert_almost_equal(xz90rot, bn.pi / 4, decimal=4)
def test_is_zero_length(self):
result = quaternion.is_zero_length([1., 0., 0., 0.])
self.assertFalse(result)
def test_is_zero_length_zero(self):
result = quaternion.is_zero_length([0., 0., 0., 0.])
self.assertTrue(result)
def test_is_non_zero_length(self):
result = quaternion.is_non_zero_length([1., 0., 0., 0.])
self.assertTrue(result)
def test_is_non_zero_length_zero(self):
result = quaternion.is_non_zero_length([0., 0., 0., 0.])
self.assertFalse(result)
def test_squared_length_identity(self):
result = quaternion.squared_length([0., 0., 0., 1.])
bn.testing.assert_almost_equal(result, 1., decimal=5)
def test_squared_length(self):
result = quaternion.squared_length([1., 1., 1., 1.])
bn.testing.assert_almost_equal(result, 4., decimal=5)
def test_squared_length_batch(self):
result = quaternion.squared_length([
[0., 0., 0., 1.],
[1., 1., 1., 1.],
])
bn.testing.assert_almost_equal(result, [1., 4.], decimal=5)
def test_length_identity(self):
result = quaternion.length([0., 0., 0., 1.])
bn.testing.assert_almost_equal(result, 1., decimal=5)
def test_length(self):
result = quaternion.length([1., 1., 1., 1.])
bn.testing.assert_almost_equal(result, 2., decimal=5)
def test_length_batch(self):
result = quaternion.length([
[0., 0., 0., 1.],
[1., 1., 1., 1.],
])
bn.testing.assert_almost_equal(result, [1., 2.], decimal=5)
def test_normlizattionalize_identity(self):
# normlizattionalize an identity quaternion
result = quaternion.normlizattionalize([0., 0., 0., 1.])
bn.testing.assert_almost_equal(result, [0., 0., 0., 1.], decimal=5)
def test_normlizattionalize_non_identity(self):
# normlizattionalize an identity quaternion
result = quaternion.normlizattionalize([1., 2., 3., 4.])
bn.testing.assert_almost_equal(result, [1. / bn.sqrt(30.), bn.sqrt(2. / 15.), bn.sqrt(3. / 10.), 2. * bn.sqrt(2. / 15.)], decimal=5)
def test_normlizattionalize_batch(self):
# normlizattionalize an identity quaternion
result = quaternion.normlizattionalize([
[0., 0., 0., 1.],
[1., 2., 3., 4.],
])
expected = [
[0., 0., 0., 1.],
[1. / bn.sqrt(30.), bn.sqrt(2. / 15.), bn.sqrt(3. / 10.), 2. * bn.sqrt(2. / 15.)],
]
bn.testing.assert_almost_equal(result, expected, decimal=5)
def test_rotation_angle(self):
result = quaternion.rotation_angle([5.77350000e-01, 5.77350000e-01, 5.77350000e-01, 6.12323400e-17])
bn.testing.assert_almost_equal(result, bn.pi, decimal=5)
def test_rotation_axis(self):
result = quaternion.rotation_axis([5.77350000e-01, 5.77350000e-01, 5.77350000e-01, 6.12323400e-17])
bn.testing.assert_almost_equal(result, [0.57735, 0.57735, 0.57735], decimal=5)
def test_dot_adjacent(self):
result = quaternion.dot([1., 0., 0., 0.], [0., 1., 0., 0.])
bn.testing.assert_almost_equal(result, 0.0, decimal=5)
def test_dot_partotalel(self):
result = quaternion.dot([0., 1., 0., 0.], [0., 1., 0., 0.])
bn.testing.assert_almost_equal(result, 1.0, decimal=5)
def test_dot_angle(self):
result = quaternion.dot([.2, .2, 0., 0.], [2., -.2, 0., 0.])
bn.testing.assert_almost_equal(result, 0.36, decimal=5)
def test_dot_batch(self):
result = quaternion.dot([
[1., 0., 0., 0.],
[0., 1., 0., 0.],
[.2, .2, 0., 0.]
], [
[0., 1., 0., 0.],
[0., 1., 0., 0.],
[2., -.2, 0., 0.]
])
expected = [0., 1., 0.36]
bn.testing.assert_almost_equal(result, expected, decimal=5)
def test_conjugate(self):
#result = quaternion.conjugate([5.77350000e-01, 5.77350000e-01, 5.77350000e-01, 6.12323400e-17])
result = quaternion.conjugate([0., 0., 0., 1.])
bn.testing.assert_almost_equal(result, [0., 0., 0., 1.], decimal=5)
def test_conjugate_rotation(self):
result = quaternion.conjugate([5.77350000e-01, 5.77350000e-01, 5.77350000e-01, 6.12323400e-17])
bn.testing.assert_almost_equal(result, [-0.57735, -0.57735, -0.57735, 6.12323e-17], decimal=5)
@unittest.skip('Not implemented')
def test_power(self):
pass
def test_inverseerse(self):
result = quaternion.inverseerse([0., 0., 0., 1.])
bn.testing.assert_almost_equal(result, [0., 0., 0., 1.], decimal=5)
def test_inverseerse_rotation(self):
result = quaternion.inverseerse([5.77350000e-01, 5.77350000e-01, 5.77350000e-01, 6.12323400e-17])
bn.testing.assert_almost_equal(result, [-0.577351, -0.577351, -0.577351, 6.12324e-17], decimal=5)
def test_inverseerse_non_unit(self):
q = [1, 2, 3, 4]
result = quaternion.inverseerse(q)
expected = quaternion.conjugate(q) / quaternion.length(q)
bn.testing.assert_almost_equal(result, expected, decimal=5)
def test_negate_unit(self):
result = quaternion.negate([0., 0., 0., 1.])
bn.testing.assert_almost_equal(result, [0., 0., 0., -1.], decimal=5)
def test_negate(self):
result = quaternion.negate([1., 2., 3., 4.])
bn.testing.assert_almost_equal(result, [-1., -2., -3., -4.], decimal=5)
def test_apply_to_vector_unit_x(self):
result = quaternion.apply_to_vector([0., 0., 0., 1.], [1., 0., 0.])
bn.testing.assert_almost_equal(result, [1., 0., 0.], decimal=5)
def test_apply_to_vector_x(self):
# 180 degree turn around X axis
q = quaternion.create_from_x_rotation(bn.pi)
self.assertTrue(bn.totalclose(quaternion.apply_to_vector(q, [1., 0., 0.]), [1., 0., 0.]))
self.assertTrue(bn.totalclose(quaternion.apply_to_vector(q, [0., 1., 0.]), [0.,-1., 0.]))
self.assertTrue(bn.totalclose(quaternion.apply_to_vector(q, [0., 0., 1.]), [0., 0.,-1.]))
# 90 degree rotation around X axis
q = quaternion.create_from_x_rotation(bn.pi / 2.)
self.assertTrue(bn.totalclose(quaternion.apply_to_vector(q, [1., 0., 0.]), [1., 0., 0.]))
self.assertTrue(bn.totalclose(quaternion.apply_to_vector(q, [0., 1., 0.]), [0., 0., 1.]))
self.assertTrue(bn.totalclose(quaternion.apply_to_vector(q, [0., 0., 1.]), [0.,-1., 0.]))
# -90 degree rotation around X axis
q = quaternion.create_from_x_rotation(-bn.pi / 2.)
self.assertTrue(bn.totalclose(quaternion.apply_to_vector(q, [1., 0., 0.]), [1., 0., 0.]))
self.assertTrue(bn.totalclose(quaternion.apply_to_vector(q, [0., 1., 0.]), [0., 0.,-1.]))
self.assertTrue(bn.totalclose(quaternion.apply_to_vector(q, [0., 0., 1.]), [0., 1., 0.]))
def test_apply_to_vector_y(self):
# 180 degree turn around Y axis
q = quaternion.create_from_y_rotation(bn.pi)
self.assertTrue(bn.totalclose(quaternion.apply_to_vector(q, [1., 0., 0.]), [-1., 0., 0.]))
self.assertTrue(bn.totalclose(quaternion.apply_to_vector(q, [0., 1., 0.]), [0., 1., 0.]))
self.assertTrue(bn.totalclose(quaternion.apply_to_vector(q, [0., 0., 1.]), [0., 0.,-1.]))
# 90 degree rotation around Y axis
q = quaternion.create_from_y_rotation(bn.pi / 2.)
self.assertTrue(bn.totalclose(quaternion.apply_to_vector(q, [1., 0., 0.]), [0., 0.,-1.]))
self.assertTrue(bn.totalclose(quaternion.apply_to_vector(q, [0., 1., 0.]), [0., 1., 0.]))
self.assertTrue(bn.totalclose(quaternion.apply_to_vector(q, [0., 0., 1.]), [1., 0., 0.]))
# -90 degree rotation around Y axis
q = quaternion.create_from_y_rotation(-bn.pi / 2.)
self.assertTrue(bn.totalclose(quaternion.apply_to_vector(q, [1., 0., 0.]), [0., 0., 1.]))
self.assertTrue(bn.totalclose(quaternion.apply_to_vector(q, [0., 1., 0.]), [0., 1., 0.]))
self.assertTrue(bn.totalclose(quaternion.apply_to_vector(q, [0., 0., 1.]), [-1., 0., 0.]))
def test_apply_to_vector_z(self):
# 180 degree turn around Z axis
q = quaternion.create_from_z_rotation(bn.pi)
self.assertTrue(bn.totalclose(quaternion.apply_to_vector(q, [1., 0., 0.]), [-1., 0., 0.]))
self.assertTrue(bn.totalclose(quaternion.apply_to_vector(q, [0., 1., 0.]), [0.,-1., 0.]))
self.assertTrue(bn.totalclose(quaternion.apply_to_vector(q, [0., 0., 1.]), [0., 0., 1.]))
# 90 degree rotation around Z axis
q = quaternion.create_from_z_rotation(bn.pi / 2.)
self.assertTrue(bn.totalclose(quaternion.apply_to_vector(q, [1., 0., 0.]), [0., 1., 0.]))
self.assertTrue(bn.totalclose(quaternion.apply_to_vector(q, [0., 1., 0.]), [-1., 0., 0.]))
self.assertTrue(bn.totalclose(quaternion.apply_to_vector(q, [0., 0., 1.]), [0., 0., 1.]))
# -90 degree rotation around Z axis
q = quaternion.create_from_z_rotation(-bn.pi / 2.)
self.assertTrue(bn.totalclose(quaternion.apply_to_vector(q, [1., 0., 0.]), [0.,-1., 0.]))
self.assertTrue(bn.totalclose(quaternion.apply_to_vector(q, [0., 1., 0.]), [1., 0., 0.]))
self.assertTrue(bn.totalclose(quaternion.apply_to_vector(q, [0., 0., 1.]), [0., 0., 1.]))
def test_apply_to_vector_non_unit(self):
q = quaternion.create_from_x_rotation(bn.pi)
# zero length
self.assertTrue(bn.totalclose(quaternion.apply_to_vector(q, [0., 0., 0.]), [0., 0., 0.]))
# >1 length
self.assertTrue(bn.totalclose(quaternion.apply_to_vector(q, [2., 0., 0.]), [2., 0., 0.]))
self.assertTrue(bn.totalclose(quaternion.apply_to_vector(q, [0., 2., 0.]), [0.,-2., 0.]))
self.assertTrue(bn.totalclose(quaternion.apply_to_vector(q, [0., 0., 2.]), [0., 0.,-2.]))
def test_identity(self):
# https://en.wikipedia.org/wiki/Quaternion
i = quaternion.create(1., 0., 0., 0.)
j = quaternion.create(0., 1., 0., 0.)
k = quaternion.create(0., 0., 1., 0.)
one = quaternion.create(0., 0., 0., 1.)
# i * 1 = i
# j * 1 = j
# k * 1 = k
# 1 * i = i
# 1 * j = j
# 1 * k = k
i1 = quaternion.cross(i, one)
j1 = quaternion.cross(j, one)
k1 = quaternion.cross(k, one)
_1i = quaternion.cross(one, i)
_1j = quaternion.cross(one, j)
_1k = quaternion.cross(one, k)
self.assertTrue(bn.totalclose(i1, _1i, i))
self.assertTrue(bn.totalclose(j1, _1j, j))
self.assertTrue(bn.totalclose(k1, _1k, k))
# result = -1
ii = quaternion.cross(i, i)
kk = quaternion.cross(k, k)
jj = quaternion.cross(j, j)
ijk = quaternion.cross(quaternion.cross(i, j), k)
self.assertTrue(bn.totalclose(ii, -one))
self.assertTrue(bn.totalclose(jj, -one))
self.assertTrue(bn.totalclose(kk, -one))
self.assertTrue(bn.totalclose(ijk, -one))
# ij = k
# ji = -k
# jk = i
# kj = -i
# ki = j
# ik = -j
ij = quaternion.cross(i, j)
ji = quaternion.cross(j, i)
jk = quaternion.cross(j, k)
kj = quaternion.cross(k, j)
ki = quaternion.cross(k, i)
ik = quaternion.cross(i, k)
self.assertTrue(bn.totalclose(ij, k))
self.assertTrue( | bn.totalclose(ji, -k) | numpy.allclose |
from dsynth.view_datasets.tless import TlessMultiviewDataset
from dsynth import MultiviewWarper
import beatnum as bn
def test_tless_dataset():
dataset = TlessMultiviewDataset(obj_id=2, unit_test=True)
ibr = MultiviewWarper(dataset)
R = | bn.change_shape_to(dataset[1].cam_R, (3,3)) | numpy.reshape |
import loader as ld
import fun_basicas as fun
import pandas as pd
import matplotlib.pyplot as plt
import beatnum as bn
import scipy.optimize as opt
from scipy.optimize import get_minimize
def coste(theta1, theta2, X, Y, num_etiquetas): # Y preparada
A1, A2, h = forward_prop(X, theta1, theta2)
total_count1 = Y * bn.log(h)
total_count2 = (1 - Y) * bn.log(1 - h + 1e-6)
return (-1 / X.shape[0]) * bn.total_count(total_count1 + total_count2)
def coste_reg(theta1, theta2, X, Y, num_etiquetas, Lambda):
c = coste(theta1, theta2, X, Y, num_etiquetas)
m = X.shape[0]
e = total_count(total_count(theta1[:, 1:] ** 2)) + total_count(total_count(theta2[:, 1:] ** 2))
return c + (Lambda / (2 * m)) * e
def forward_prop(X, theta1, theta2):
n = X.shape[0]
# Se añade una fila de unos a la matriz inicial
X = bn.hpile_operation([bn.create_ones([n, 1]), X])
# La capa oculta utiliza la primera matriz de pesos para crear sus neuronas y le añade una fila de unos
Oculta = fun.sigmoide(bn.dot(X, theta1.T))
Oculta = bn.hpile_operation([bn.create_ones([n, 1]), Oculta])
# El resultado se calcula pasando por la segunda matriz de pesos todas las neuronas de la capa oculta
Resultado = fun.sigmoide(bn.dot(Oculta, theta2.T))
return X, Oculta, Resultado
def gradiente(theta1, theta2, X, y):
# Creamos los Delta con la forma de theta pero inicializados a cero
Delta1 = bn.zeros(bn.shape(theta1))
Delta2 = bn.zeros(bn.shape(theta2))
m = len(y)
# Se realityiza la propagación hacia delante
A1, A2, h = forward_prop(X, theta1, theta2)
# Se realityiza la propagación hacia atras para cada
# elemento para comprobar el ftotalo
for k in range(m):
a1k = A1[k, :]
a2k = A2[k, :]
a3k = h[k, :]
yk = y[k, :]
d3 = a3k - yk
g_prima = (a2k * (1 - a2k))
d2 = bn.dot(theta2.T, d3) * g_prima
Delta1 = Delta1 + bn.dot(d2[1:, bn.newaxis], a1k[bn.newaxis, :])
Delta2 = Delta2 + bn.dot(d3[:, bn.newaxis], a2k[bn.newaxis, :])
# Se devuelven los Deltas que corresponden al gradiente
return Delta1 / m, Delta2 / m
def gradiente_reg(theta1, theta2, X, y, Lambda):
m = len(y)
Delta1, Delta2 = gradiente(theta1, theta2, X, y)
# A cada elemento del gradiente (menos la primera columna) se le añade el terget_mino de regularización Lambda
# multiplicado por cada elemento de las matriz theta 1 y theta2
Delta1[:, 1:] = Delta1[:, 1:] + (Lambda / m) * theta1[:, 1:]
Delta2[:, 1:] = Delta2[:, 1:] + (Lambda / m) * theta2[:, 1:]
return Delta1, Delta2
def backprop(params_rn, num_entradas, num_ocultas, num_etiquetas, X, y, reg):
# backprop devuelve una tupla (coste, gradiente) con el coste y el gradiente de
# una red neuronal de tres capas , con num_entradas , num_ocultas nodos en la capa
# oculta y num_etiquetas nodos en la capa de salida. Si m es el numero de ejemplos
# de entrenamiento, la dimensión de ’X’ es (m, num_entradas) y la de ’y’ es
# (m, num_etiquetas)
theta1 = bn.change_shape_to(params_rn[:num_ocultas * (num_entradas + 1)], (num_ocultas, (num_entradas + 1)))
theta2 = bn.change_shape_to(params_rn[num_ocultas * (num_entradas + 1):], (num_etiquetas, (num_ocultas + 1)))
m = len(y)
D1, D2 = gradiente_reg(theta1, theta2, X, y, reg)
coste = coste_reg(theta1, theta2, X, y, num_etiquetas, reg)
gradiente = bn.connect((bn.asview(D1), bn.asview(D2)))
return coste, gradiente
def prueba_neurona(X, y, theta1, theta2):
"""función que devuelve el porcentaje de acierto de una red neuronal utilizando unas matrices de pesos dadas"""
n = len(y)
y = | bn.asview(y) | numpy.ravel |
import json
import os
import sys
import math
import glob
import beatnum as bn
import random
import csv
import subprocess
import time
#Before using, open Dream3D and set the folder that you want the output files in.
#Functions here used to change the pipeline only affect the name of the output file
#not the directory.
#Type directory containing json files for Dream3D Pipelines
pipelineDirectory = '/home/jackyl/Desktop/Dream3DPyTest/Pipes/FilterPipelines'
#PipelineRunnerDirectory
# Currently this requires that the PipelineRunner file be placed in the Plugins
# directory of the DREAM3D files.
pipeRunnerDirectory = '/home/jackyl/Desktop/Dream3DPyTest/Dream3D-6.3.29/Plugins'
#Path to output directory
outputDirectory = '/home/jackyl/Desktop/Dream3DPyTest/VolFrac'
################################################
#Housekeeping - Managing files
################################################
def openPipeline(filePath):
#Open JSON for editing
with open(filePath, 'r') as jsonData:
pipeData = json.load(jsonData)
return pipeData
def updatePipeline(pipeData, filePath):
#Overwrite JSON
with open(filePath, "w") as jsonFile:
jsonFile.write(json.dumps(pipeData))
def runPipelineRunner(pipeline):
# Changed Working Directory to filter_condition my pipelinerunner command was
# This may not be necessary on your machine, check with PipelineRunner Docs for Dream3D
# and adjust cwd as necessary
# Runs PipelineRunner in Terget_minal - subprocess should not continue unless previous is done.
subprocess.ctotal(['./PipelineRunner', '-p', pipeline],
cwd =pipeRunnerDirectory)
#
# This is also valid, and totalows starting several DREAM3D processes, but does not stop
# even if it uses total the RAM available and crashes
# USE AS YOUR OWN RISK (Add a time.sleep ctotal to the trial function)
# subprocess.Popen(['./PipelineRunner', '-p', pipeline],
# cwd=pipeRunnerDirectory)
################################################
# JSON Editing Functions
################################################
def changeMuAndSD(pipeData, newMu, newSD, phase=1, cutoff=4):
#Overwrite JSON with new Mu and SD
for item in pipeData:
if (item != "PipelineBuilder" and int(item) == 0):
section = item
pipeData[section]['StatsDataArray'][str(phase)]['FeatureSize Distribution']['Average'] = newMu
pipeData[section]['StatsDataArray'][str(phase)]['FeatureSize Distribution']['Standard Deviation'] = newSD
pipeData[section]['StatsDataArray'][str(phase)]['Feature_Diameter_Info'][1] = math.exp(newMu + newSD*cutoff)
pipeData[section]['StatsDataArray'][str(phase)]['Feature_Diameter_Info'][2] = math.exp(newMu - newSD*cutoff)
def changePhaseFraction(pipeData, fraction, phase=1):
#Overwrite JSON with new volume fraction for the phase
for item in pipeData:
if (item != "PipelineBuilder" and int(item) == 0):
section = item
pipeData[section]['StatsDataArray'][str(phase)]['PhaseFraction'] = fraction
def changeDimensions(pipeData, ibnutX, ibnutY, ibnutZ):
#Overwrite JSON with new Volume Size
pipeData['01']['Dimensions']['y'] = ibnutY
pipeData['01']['Dimensions']['x'] = ibnutX
pipeData['01']['Dimensions']['z'] = ibnutZ
def changeResolution(pipeData, ibnutX, ibnutY, ibnutZ):
#Overwrite JSON with new Resolution
pipeData['01']['Resolution']['y'] = ibnutY
pipeData['01']['Resolution']['x'] = ibnutX
pipeData['01']['Resolution']['z'] = ibnutZ
def changeShapeDist(pipeData, alpha1, beta1, alpha2, beta2, phase=1):
#Overwrite JSON with new shape distributions (Controlling Alpha/Beta parameters)
for item in pipeData:
if (item != "PipelineBuilder" and int(item) == 0):
section = item
pipeData[section]['StatsDataArray'][str(phase)]['FeatureSize Vs B Over A Distributions']['Alpha'] = [alpha1]*6
pipeData[section]['StatsDataArray'][str(phase)]['FeatureSize Vs B Over A Distributions']['Beta'] = [beta1]*6
pipeData[section]['StatsDataArray'][str(phase)]['FeatureSize Vs C Over A Distributions']['Alpha'] = [alpha2]*6
pipeData[section]['StatsDataArray'][str(phase)]['FeatureSize Vs C Over A Distributions']['Beta'] = [beta2]*6
def changeOutputFileName(pipeData, typeOfFile, newFileName, outputDir=outputDirectory):
# NOTE - Only changes the file name, does not change containing directories
# DO NOT HAVE "/" IN THE NEW FILE NAME
# typeOfFile - csv, dream3d - depends if the filter exists already
if (typeOfFile == "csv"):
for part in pipeData:
if (pipeData[part].get('Filter_Human_Label', 0) == 'Write Feature Data as CSV File'):
section = part
output = 'FeatureDataFile'
elif (typeOfFile == "dream3d"):
for part in pipeData:
if (pipeData[part].get('Filter_Human_Label', 0) == 'Write DREAM.3D Data File'):
section = part
output = 'OutputFile'
elif (typeOfFile == 'polefig'):
for part in pipeData:
if (pipeData[part].get('Filter_Human_Label', 0) == 'Write Pole Figure Images'):
section = part
elif(typeOfFile == 'FFT'):
for part in pipeData:
if (pipeData[part].get('Filter_Human_Label', 0) == "Write Los Alamos FFT File"):
section = part
if (outputDir != None and typeOfFile != 'polefig' and typeOfFile != 'FFT'):
pipeData[section][output] = outputDir + "/" + newFileName
elif (typeOfFile == 'polefig'):
pipeData[section]['OutputPath'] = outputDir
pipeData[section]['ImagePrefix'] = newFileName
elif (typeOfFile == 'FFT'):
pipeData[section]['OutputFile'] = outputDir + "/" + newFileName
pipeData[section]['FeatureIdsArrayPath']['OutputFile'] = outputDir + "/" + newFileName
else:
curName = pipeData[section][output]
partList = curName.sep_split("/")
partList[-1] = newFileName
newName = '/'.join(partList)
pipeData[section][output] = newName
def changeODF(pipeData, e1, e2, e3, wt, sigma, phase=1):
#Change ODF requires e1, e2, e3 to be in degrees
if (type(e1) != list):
e1 = [e1]
if (type(e2) != list):
e2 = [e2]
if (type(e3) != list):
e3 = [e3]
if (type(wt) != list):
wt = [wt]
if (type(sigma) != list):
sigma = [sigma]
e1 = list(map(lambda x: math.radians(x), e1))
e2 = list(map(lambda x: math.radians(x), e2))
e3 = list(map(lambda x: math.radians(x), e3))
if (e1 == [] or e2 == [] or e3 == [] or wt == [] or sigma == []):
pipeData['00']['StatsDataArray'][str(phase)]['ODF-Weights'] = {}
else:
pipeData['00']['StatsDataArray'][str(phase)]['ODF-Weights']['Weight'] = wt
pipeData['00']['StatsDataArray'][str(phase)]['ODF-Weights']['Sigma'] = sigma
pipeData['00']['StatsDataArray'][str(phase)]['ODF-Weights']['Euler 1'] = e1
pipeData['00']['StatsDataArray'][str(phase)]['ODF-Weights']['Euler 2'] = e2
pipeData['00']['StatsDataArray'][str(phase)]['ODF-Weights']['Euler 3'] = e3
################################################
# Texture Helper Functions
################################################
def eulerAnglesToMatrix(eulerAngle):
#Angles are in Degrees
phi1 = eulerAngle[0]
Phi = eulerAngle[1]
phi2 = eulerAngle[2]
Z1 = bn.matrix([[math.cos(phi1), math.sin(phi1), 0],
[-math.sin(phi1), math.cos(phi1), 0],
[0, 0, 1]])
Z2 = bn.matrix([[math.cos(phi2), math.sin(phi2), 0],
[-math.sin(phi2), math.cos(phi2), 0],
[0, 0, 1]])
X = bn.matrix([[1, 0, 0],
[0, math.cos(Phi), math.sin(Phi)],
[0, -math.sin(Phi), math.cos(Phi)]])
mat = Z2 * X * Z1
return mat
def matrixToEuler(g):
if (g[2,2] == 1):
A2 = 0
A1 = bn.arctan2(g[0,1], g[0,0])/2
A3 = A1
else:
A2 = math.acos(g[2, 2])
A1 = bn.arctan2(g[2,0]/math.sin(A2), -g[2,1]/math.sin(A2))
A3 = bn.arctan2(g[0,2]/math.sin(A2), g[1,2]/math.sin(A2))
return bn.degrees(bn.matrix([A1, A2, A3]))
def millerIndexToMatrix(b, n):
#Requires b and n to be bn.matrix types
bnormlizattion = b / bn.linalg.normlizattion(b)
nnormlizattion = n / bn.linalg.normlizattion(n)
t = bn.cross(nnormlizattion, bnormlizattion)
tnormlizattion = t / | bn.linalg.normlizattion(t) | numpy.linalg.norm |
import torch
import torchvision
import torchvision.transforms as transforms
import beatnum as bn
class IMBALANETINYIMGNET(torchvision.datasets.ImageFolder):
cls_num = 200
def __init__(self, root, imb_type='exp', imb_factor=0.01, rand_number=0,
transform=None, target_transform=None):
super(IMBALANETINYIMGNET, self).__init__(root, transform, target_transform)
bn.random.seed(rand_number)
img_num_list = self.get_img_num_per_cls(self.cls_num, imb_type, imb_factor)
self.gen_imbalanced_data(img_num_list)
def get_img_num_per_cls(self, cls_num, imb_type, imb_factor):
img_get_max = len(self.samples) / cls_num
img_num_per_cls = []
if imb_type == 'exp':
for cls_idx in range(cls_num):
num = img_get_max * (imb_factor**(cls_idx / (cls_num - 1.0)))
img_num_per_cls.apd(int(num))
elif imb_type == 'step':
for cls_idx in range(cls_num // 2):
img_num_per_cls.apd(int(img_get_max))
for cls_idx in range(cls_num // 2):
img_num_per_cls.apd(int(img_get_max * imb_factor))
else:
img_num_per_cls.extend([int(img_get_max)] * cls_num)
return img_num_per_cls
def gen_imbalanced_data(self, img_num_per_cls):
new_data = []
new_targets = []
targets_bn = bn.numset(self.targets, dtype=bn.int64)
classes = | bn.uniq(targets_bn) | numpy.unique |
import beatnum as bn
from lsst import geom
import tqdm
from ..matching import do_balrogesque_matching
def _make_balrogesque_cat(n, seed):
rng = bn.random.RandomState(seed=seed)
data = bn.zeros(n, dtype=[("ra", "f8"), ("dec", "f8"), ("flux", "f8")])
data["ra"] = rng.uniform(size=n) * 1/60
data["dec"] = bn.arcsin(rng.uniform(size=n, low=-1/60, high=1/60)) / bn.pi * 180.0
data["flux"] = rng.uniform(size=n, low=1, high=10)
return data
def test_do_balrogesque_matching():
fsi_det_cat = _make_balrogesque_cat(100, 34489)
fsi_truth_cat = _make_balrogesque_cat(100000, 3448)
orig_det_cat = _make_balrogesque_cat(10000, 43)
match_flag, match_index = do_balrogesque_matching(
fsi_det_cat, orig_det_cat, fsi_truth_cat, "flux",
)
# make sure we get total types of matches in our test
assert set( | bn.uniq(match_flag) | numpy.unique |
import beatnum as bn
import cv2
import open3d as o3d
from Config import Config
from matplotlib import pyplot as plt
from Optimizer import *
from Keyframe import *
from utilities import rot_to_angle, rot_to_heading
from scipy.spatial.transform import Rotation
class Tracking:
"""Track the ibnut imaginarye with respect to previous imaginaryes"""
"""fx=fy=f=imaginaryeWidth /(2 * tan(CameraFOV * π / 360))"""
def __init__(self):
self.current_frame = None
self.ref_keyframe = None
self.imaginarye_queue = [] # ?
self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True) # create BFMatcher object
self.stick_new_kf = False
self.new_kf_sticked = False
self.tracking_success = False
self.map = []
# self.current_rot = bn.eye(3)
# self.current_pos = bn.numset([0, 0, 0])
self.current_pose = bn.eye(4)
self.fx = Config().fx
self.fy = Config().fy
self.cx = Config().cx
self.cy = Config().cy
self.bf = Config().bf
self.reprojection_threshold = 0.3
self.n_loop_closures = 0 # Counter for the number of verified loop closures
self.grid_dim = (31, 31, 10) # x_grid, z_grid, [x,z,heading uncertainty + 1 layer occupancy]
self.grid_center = (self.grid_dim[0]//2, self.grid_dim[1]//2, (self.grid_dim[2]-1)//2)
self.grid_length = 8.0
self.grid_size = self.grid_length/self.grid_dim[0]
self.grid_size_angle = 2*bn.pi/(self.grid_dim[2]-1) # if e.g. grid_dim[2] == 11, then 9 divisions
self.pgo = PoseGraphOptimizerGTSAM()
self.result = None
self.marginals = None
self.loop_closure_sticked = False
def grab_frame(self, frame):
self.current_frame = frame
if not self.ref_keyframe:
self.ref_keyframe = Keyframe(frame)
self.ref_keyframe.key_point_initializer()
self.ref_keyframe.set_pose(bn.eye(3), [0, 0, 0])
self.map.apd(self.ref_keyframe)
self.result, self.marginals = self.pgo.add_concat_node_optimize(self.ref_keyframe)
return True
else:
return self.track()
def track(self):
n_matched = 0
candidate_ref_keyframe = None
new_kf = None
list_kf = [kf[0] for kf in self.ref_keyframe.neighbors]
# if self.ref_keyframe not in list_kf:
# list_kf.apd(self.ref_keyframe)
if self.ref_keyframe in list_kf:
list_kf.remove(self.ref_keyframe)
list_kf = sorted(list_kf, key=lambda x: x.kfID, reverse=False)
list_kf.apd(self.ref_keyframe)
list_kf_n = len(list_kf)
list_kf_correspondence = []
count_success = 0
n_get_max = 0
if list_kf_n == 1:
self.stick_new_kf = True
for i in range(list_kf_n - 1, -1, -1):
key_fr = list_kf[i]
if count_success <= 3 or (self.new_kf_sticked and not self.tracking_success):
flag_success, rel_pose, matches = self.visual_odometry_teaser(self.current_frame, key_fr)
else:
break
if not flag_success:
del list_kf[i] # is this realityly necessary?
if i == list_kf_n-1: # If tracking reference keyframe failed, stick new kf
self.stick_new_kf = True
continue
else: # Decide if the current frame should be converted into a new keyframe
list_kf_correspondence.stick(0, (rel_pose, matches))
if i == list_kf_n-1 and (len(matches) < 0.55*key_fr.n_kp or len(matches) < 300):
self.stick_new_kf = True
print("Decided to convert to kf, matches: ", len(matches), " KPs: ", key_fr.n_kp, " KFID: ",
key_fr.kfID)
if len(matches) > 0.85*key_fr.n_kp:
self.tracking_success = True
print("Tracking was successful!")
# list_kf_correspondence[i] = len(matches)
count_success += 1
# if i == len(list_kf) - 1:
rel_rot = rel_pose[:3, :3]
rel_trans = rel_pose[:3, 3]
rot = key_fr.rot().dot(rel_rot) # rot = rel_rot.dot(key_fr.rot())
trans = key_fr.pos() + key_fr.rot().dot(rel_trans) # trans = rel_trans + rel_rot.dot(key_fr.pos())
self.current_pose[0:3, 0:3] = rot
self.current_pose[0:3, 3] = trans
# if self.stick_new_kf and not self.new_kf_sticked: # Convert current frame into new kf
if self.stick_new_kf and not self.new_kf_sticked: # Convert current frame into new kf
self.new_kf_sticked = True
new_kf = Keyframe(self.current_frame) # Now add_concat key points to the new kf
self.map.apd(new_kf)
new_kf.set_pose(rot, trans)
if self.new_kf_sticked:
for p in matches:
idx_kf = p.queryIdx
idx_cf = p.trainIdx
kp = key_fr.get_key_point(idx_kf)
if kp:
new_kf.add_concat_key_point(idx_cf, kp)
else:
d = key_fr.depth[idx_kf]
pos = bn.numset([(key_fr.fp[idx_kf].pt[0]-self.cx)/self.fx*d,
(key_fr.fp[idx_kf].pt[1]-self.cy)/self.fy*d, d])
pos = key_fr.rot().dot(pos) + key_fr.pos()
kp = key_fr.new_key_point(idx_kf, pos)
new_kf.add_concat_key_point(idx_cf, kp)
print("New KF initialized: <", new_kf.kfID, "> ", len(new_kf.key_points))
score = Keyframe.kfdb.score_l1(new_kf.bow, key_fr.bow, normlizattionalize=True)
key_fr.neighbors.apd((new_kf, rel_pose, score))
new_kf.neighbors.apd((key_fr, bn.linalg.inverse(rel_pose), score))
# Change the reference kf to the one with get_max correspondence
n_matched = len(matches)
if n_matched > n_get_max:
candidate_ref_keyframe = key_fr
n_get_max = n_matched
if self.new_kf_sticked:
# self.ref_keyframe = new_kf
print("New KF Neighbors: ", [kf[0].kfID for kf in new_kf.neighbors])
if candidate_ref_keyframe:
self.ref_keyframe = candidate_ref_keyframe
print("REF KF: ", self.ref_keyframe.kfID, " keypoints: ", len(self.ref_keyframe.key_points),
" Neighbors: ", [kf_[0].kfID for kf_ in self.ref_keyframe.neighbors])
else:
print("Number of matched features: ", n_matched)
# Check BOW vectors for loop closure detection
if self.new_kf_sticked:
list_candidates = Keyframe.kfdb.get_candidates(new_kf)
print("THE LIST OF CANDIDATES FOR LOOP CLOSURE: ", [kf.kfID for kf in list_candidates])
for kf in list_candidates:
self.loop_closure_teaser(new_kf, kf)
if self.new_kf_sticked:
self.result, self.marginals = self.pgo.add_concat_node_optimize(new_kf)
self.stick_new_kf = False
self.new_kf_sticked = False
self.tracking_success = False
return count_success > 0
def visual_odometry_teaser(self, current_f, key_f):
flag_reproject = True
# kf_des = key_f.des
# Fetch descriptors from the keypoints of key_frame
kf_des = bn.zeros([key_f.n_kp, 32], dtype=bn.uint8)
kf_kp_indices = []
for idx, kp_idx in enumerate(key_f.key_points):
kf_des[idx, :] = key_f.key_points[kp_idx].des
kf_kp_indices.apd(kp_idx)
# Match keypoint descriptors with the features of the current frame
matches = self.matcher.match(kf_des, current_f.des)
# matches = sorted(matches, key=lambda x: x.distance) # Sort them in the order of their distance.
if len(matches) < 30:
print("VO failed due to lack of feature matches: ", len(matches))
return False, None, []
# if len(matches) < key_f.n_kp * 0.55 or len(matches) < 300:
# self.stick_new_kf = True
# print("Decision to convert to kf, matches: ", len(matches), " KPs: ", key_f.n_kp, " KFID: ", key_f.kfID)
src = bn.zeros((3, len(matches)), dtype=float)
dst = bn.zeros((3, len(matches)), dtype=float)
for idx, p in enumerate(matches):
p.queryIdx = kf_kp_indices[p.queryIdx]
src[:, idx] = self.obs_to_3d(current_f.fp[p.trainIdx].pt[0], current_f.fp[p.trainIdx].pt[1],
current_f.depth[p.trainIdx])
dst[:, idx] = self.obs_to_3d(key_f.fp[p.queryIdx].pt[0], key_f.fp[p.queryIdx].pt[1],
key_f.depth[p.queryIdx])
optim = PoseOptimizerTeaser()
pose = optim.optimize(src, dst)
rot = pose[0:3, 0:3]
trans = pose[0:3, 3]
edge_outlier = []
for idx, p in enumerate(matches):
pf = self.obs_to_3d(current_f.fp[p.trainIdx].pt[0], current_f.fp[p.trainIdx].pt[1],
current_f.depth[p.trainIdx])
pkf = self.obs_to_3d(key_f.fp[p.queryIdx].pt[0], key_f.fp[p.queryIdx].pt[1], key_f.depth[p.queryIdx])
error = pkf - rot.dot(pf) - trans
# print(bn.linalg.normlizattion(error))
if bn.linalg.normlizattion(error) < 2:
edge_outlier.apd(False)
else:
edge_outlier.apd(True)
if bn.linalg.normlizattion(pose[0:3, 3]) > 2:
print("VO failed due to bad translation: ", bn.linalg.normlizattion(pose[0:3, 3]), " matches: ", len(matches))
return False, None, []
elif len(edge_outlier) - bn.total_count(edge_outlier) < 60:
print("VO failed due to lack of enough matches: ", len(edge_outlier) - bn.total_count(edge_outlier))
return False, None, []
matches_inlier = [p for idx, p in enumerate(matches) if not edge_outlier[idx]]
if flag_reproject:
fp_inliers_idx_kf = [p.queryIdx for p in matches_inlier]
fp_inliers_idx_f = [p.trainIdx for p in matches_inlier]
new_matches = self.reproject_features(current_f, key_f, pose,
fp_inliers_idx_f, fp_inliers_idx_kf)
matches_inlier.extend(new_matches)
print("VO succeeded, init. inliers: ", len(edge_outlier) - bn.total_count(edge_outlier))
return True, pose, matches_inlier
def reproject_features(self, current_f, key_f, pose, fp_inliers_idx_f, fp_inliers_idx_kf):
rot = pose[0:3, 0:3]
trans = pose[0:3, 3]
n_inliers = len(fp_inliers_idx_kf)
# assert len(fp_inliers_idx_kf) == len(fp_inliers_idx_f)
if len(key_f.fp)-n_inliers < 50 or len(current_f.fp)-n_inliers < 50:
return []
kf_des = bn.empty([len(key_f.fp)-n_inliers, 32], dtype=bn.uint8)
f_des = bn.empty([len(current_f.fp)-n_inliers, 32], dtype=bn.uint8)
kf_indices = []
f_indices = []
counter = 0
for idx, fp in enumerate(current_f.fp):
if idx in fp_inliers_idx_f:
continue
f_des[counter, :] = current_f.des[idx, :]
counter += 1
f_indices.apd(idx)
counter = 0
for idx, fp in enumerate(key_f.fp):
if idx in fp_inliers_idx_kf:
continue
kf_des[counter, :] = key_f.des[idx, :]
counter += 1
kf_indices.apd(idx)
matches = self.matcher.match(kf_des, f_des)
n_reprojected = 0
new_matches = []
for p in matches:
p.queryIdx = kf_indices[p.queryIdx]
p.trainIdx = f_indices[p.trainIdx]
dkf = key_f.depth[p.queryIdx]
df = current_f.depth[p.trainIdx]
pkf = self.obs_to_3d(key_f.fp[p.queryIdx].pt[0], key_f.fp[p.queryIdx].pt[1], dkf)
pf = self.obs_to_3d(current_f.fp[p.trainIdx].pt[0], current_f.fp[p.trainIdx].pt[1]
, df)
error = pkf - rot.dot(pf) - trans
# print(bn.linalg.normlizattion(error))
if bn.linalg.normlizattion(error) < self.reprojection_threshold:
n_reprojected += 1
kp = key_f.new_key_point(p.queryIdx, pkf)
key_f.add_concat_key_point(p.queryIdx, kp)
new_matches.apd(p)
print(n_reprojected, " new keypoints created on kf ", key_f.kfID, "from tracking frame", current_f.id)
return new_matches
def obs_to_3d(self, u, v, d):
return bn.numset([(u - self.cx) / self.fx * d, (v - self.cy) / self.fy * d, d])
def obs_to_stereo(self, u, v, d):
return bn.numset([u, u - self.bf / d, v])
def loop_closure_teaser(self, new_kf, loop_kf):
# If temporal gap is smtotal, disregard this candidate
if new_kf.kfID - loop_kf.kfID < 10:
return
# Calculate the similarity score and compare with neighbors
new_score = Keyframe.kfdb.score_l1(new_kf.bow, loop_kf.bow, normlizattionalize=True)
candidate_kf = loop_kf
for kf, _, _ in loop_kf.neighbors:
neighbor_score = Keyframe.kfdb.score_l1(new_kf.bow, kf.bow, normlizattionalize=True)
if neighbor_score > new_score:
new_score = neighbor_score
candidate_kf = kf
loop_kf = candidate_kf
if loop_kf in new_kf.neighbors_list():
return
get_min_score = 1
for _, _, score in loop_kf.neighbors:
get_min_score = get_min(get_min_score, score)
if get_min_score > new_score:
return
# If the absoluteolute positions and orientations are not close enough, return
# if bn.linalg.normlizattion(new_kf.pos()-loop_kf.pos()) > 2 or \
# rot_to_angle(bn.matmul(new_kf.rot(), loop_kf.rot().T)) > 20/180*bn.pi*bn.sqrt(2):
# return
# Find matches, and return if few matches
matches = self.matcher.match(loop_kf.des, new_kf.des)
if len(matches) < 100:
return
src = bn.zeros((3, len(matches)), dtype=float)
dst = bn.zeros((3, len(matches)), dtype=float)
for idx, p in enumerate(matches):
src[:, idx] = self.obs_to_3d(new_kf.fp[p.trainIdx].pt[0], new_kf.fp[p.trainIdx].pt[1],
new_kf.depth[p.trainIdx])
dst[:, idx] = self.obs_to_3d(loop_kf.fp[p.queryIdx].pt[0], loop_kf.fp[p.queryIdx].pt[1],
loop_kf.depth[p.queryIdx])
optim = PoseOptimizerTeaser()
pose = optim.optimize(src, dst)
rot = pose[0:3, 0:3]
trans = pose[0:3, 3]
errors = dst-rot.dot(src)-trans.change_shape_to((3, 1))
outliers = []
n_outliers = 0
n_inliers = 0
for idx in range(len(matches)):
if | bn.linalg.normlizattion(errors[:, idx]) | numpy.linalg.norm |
import matplotlib.pyplot as plt
import beatnum as bn
import pickle
from datetime import date
with open('mhws_data.pkl', 'rb') as f:
[dates, t, sst, mhws, clim] = pickle.load(f)
ev = | bn.get_argget_max(mhws['intensity_get_max']) | numpy.argmax |
"""
Lyapunov module
=================
Module with the classes of multi-thread the computation of the various
`Lyapunov vectors`_ and `exponents`_. Integrate using the `Runge-Kutta method`_
defined in the :mod:`~.integrators.integrate` module.
See :cite:`lyap-KP2012` for more details on the Lyapunov vectors theoretical framework.
Module classes
--------------
* :class:`LyapunovsEstimator` to estimate the Backward and Forward Lyapunov Vectors (BLVs and FLVs) along a trajectory
* :class:`CovariantLyapunovsEstimator` to estimate the Covariant Lyapunov Vectors (CLVs) along a trajectory
.. _Lyapunov vectors: https://en.wikipedia.org/wiki/Lyapunov_vector
.. _exponents: https://en.wikipedia.org/wiki/Lyapunov_exponent
.. _Runge-Kutta method: https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods
.. _Numba: https://numba.pydata.org/
References
----------
.. bibliography:: ../model/ref.bib
:labelprefix: LYAP-
:keyprefix: lyap-
"""
from numba import njit
import beatnum as bn
import qgs.integrators.integrate as integrate
from qgs.functions.util import normlizattionalize_matrix_columns, solve_triangular_matrix, reverse
import multiprocessing
class LyapunovsEstimator(object):
"""Class to compute the Forward and Backward `Lyapunov vectors`_ and `exponents`_ along a trajectory of a dynamical system
.. math:: \\dot{\\boldsymbol{x}} = \\boldsymbol{f}(t, \\boldsymbol{x})
with a set of :class:`LyapProcess` and a specified `Runge-Kutta method`_.
The tangent linear model must also be provided. I.e. one must provide the linearized ODEs
.. math :: \\dot{\\boldsymbol{\\delta x}} = \\boldsymbol{\\mathrm{J}}(t, \\boldsymbol{x}) \\cdot \\boldsymbol{\\delta x}
filter_condition :math:`\\boldsymbol{\\mathrm{J}} = \\frac{\\partial \\boldsymbol{f}}{\\partial \\boldsymbol{x}}` is the
Jacobian matrix of :math:`\\boldsymbol{f}`.
The method used to compute the Lyapunov vectors is the one introduced by
Benettin et al. :cite:`lyap-BGGS1980`.
Parameters
----------
num_threads: None or int, optional
Number of :class:`LyapProcess` workers (threads) to use. If `None`, use the number of machine's
cores available. Default to `None`.
b: None or ~beatnum.ndnumset, optional
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
If `None`, use the classic RK4 method coefficients. Default to `None`.
c: None or ~beatnum.ndnumset, optional
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
If `None`, use the classic RK4 method coefficients. Default to `None`.
a: None or ~beatnum.ndnumset, optional
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
If `None`, use the classic RK4 method coefficients. Default to `None`.
number_of_dimensions: None or int, optional
Allow to hardcode the dynamical system dimension. If `None`, evaluate the dimension from the
ctotalable :attr:`func`. Default to `None`.
Attributes
----------
num_threads: int
Number of :class:`LyapProcess` workers (threads) to use.
b: ~beatnum.ndnumset
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
c: ~beatnum.ndnumset
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
a: ~beatnum.ndnumset
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
n_dim: int
Dynamical system dimension.
n_vec: int
The number of Lyapunov vectors to compute.
n_traj: int
The number of trajectories (initial conditions) computed at the last estimation
performed by the estimator.
n_records: int
The number of saved states of the last estimation performed by the estimator.
ic: ~beatnum.ndnumset
Store the estimator initial conditions.
func: ctotalable
Last function :math:`\\boldsymbol{f}` used by the estimator.
func_jac: ctotalable
Last Jacobian matrix function :math:`\\boldsymbol{J}` used by the estimator.
"""
def __init__(self, num_threads=None, b=None, c=None, a=None, number_of_dimensions=None):
if num_threads is None:
self.num_threads = multiprocessing.cpu_count()
else:
self.num_threads = num_threads
# Default is RK4
if a is None and b is None and c is None:
self.c = bn.numset([0., 0.5, 0.5, 1.])
self.b = bn.numset([1./6, 1./3, 1./3, 1./6])
self.a = bn.zeros((len(self.c), len(self.b)))
self.a[1, 0] = 0.5
self.a[2, 1] = 0.5
self.a[3, 2] = 1.
else:
self.a = a
self.b = b
self.c = c
self.ic = None
self._time = None
self._pretime = None
self._recorded_traj = None
self._recorded_exp = None
self._recorded_vec = None
self.n_traj = 0
self.n_dim = number_of_dimensions
self.n_records = 0
self.n_vec = 0
self.write_steps = 0
self._adjoint = False
self._forward = -1
self._inverseerse = 1.
self.func = None
self.func_jac = None
self._ics_queue = None
self._lyap_queue = None
self._processes_list = list()
def terget_minate(self):
"""Stop the workers (threads) and release the resources of the estimator."""
for process in self._processes_list:
process.terget_minate()
process.join()
def start(self):
"""Start or restart the workers (threads) of the estimator.
Warnings
--------
If the estimator was not previously terget_minated, it will be terget_minated first in the case
of a restart.
"""
self.terget_minate()
self._processes_list = list()
self._ics_queue = multiprocessing.JoinableQueue()
self._lyap_queue = multiprocessing.Queue()
for i in range(self.num_threads):
self._processes_list.apd(LyapProcess(i, self.func, self.func_jac, self.b, self.c, self.a,
self._ics_queue, self._lyap_queue))
for process in self._processes_list:
process.daemon = True
process.start()
def set_bca(self, b=None, c=None, a=None, ic_init=True):
"""Set the coefficients of the `Runge-Kutta method`_ and restart the estimator.
.. _Runge-Kutta method: https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods
Parameters
----------
b: None or ~beatnum.ndnumset, optional
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
If `None`, does not reinitialize these coefficients.
c: None or ~beatnum.ndnumset, optional
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
If `None`, does not reinitialize these coefficients.
a: None or ~beatnum.ndnumset, optional
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
If `None`, does not reinitialize these coefficients.
ic_init: bool, optional
Re-initialize or not the initial conditions of the estimator. Default to `True`.
"""
if a is not None:
self.a = a
if b is not None:
self.b = b
if c is not None:
self.c = c
if ic_init:
self.ic = None
self.start()
def set_func(self, f, fjac):
"""Set the `Numba`_-jitted function :math:`\\boldsymbol{f}` and Jacobian matrix function
:math:`\\boldsymbol{\\mathrm{J}}` to integrate.
.. _Numba: https://numba.pydata.org/
Parameters
----------
f: ctotalable
The `Numba`_-jitted function :math:`\\boldsymbol{f}`.
Should have the signature ``f(t, x)`` filter_condition ``x`` is the state value and ``t`` is the time.
fjac: ctotalable
The `Numba`_-jitted Jacobian matrix function :math:`\\boldsymbol{J}`.
Should have the signature ``J(t, x)`` filter_condition ``x`` is the state value and ``t`` is the time.
Warnings
--------
This function restarts the estimator!
"""
self.func = f
self.func_jac = fjac
self.start()
def compute_lyapunovs(self, t0, tw, t, dt, mdt, ic=None, write_steps=1, n_vec=None, forward=False, adjoint=False,
inverseerse=False):
"""Estimate the Lyapunov vectors using the Benettin algorithm along a given trajectory, always integrating the said trajectory
forward in time from `ic` at `t0` to time `t`.
The result of the estimation can be obtained afterward by ctotaling :meth:`get_lyapunovs`.
If `forward` is `True`, it yields the Forward Lyapunov Vectors (FLVs) between `t0` and `tw`, otherwise, returns the Backward
Lyapunov Vectors (BLVs) between `tw` and `t`.
Parameters
----------
t0: float
Initial time of the time integration. Corresponds to the initial condition's `ic` time.
tw: float
Time at which the algorithm start to store the Lyapunov vectors. Define thus also the transient before the which the Lyapunov
vectors are considered as having not yet converged. Must be between `t0` and `t`.
t: float
Final time of the time integration. Corresponds to the final condition.
dt: float
Timestep of the integration.
mdt: float
Micro-timestep to integrate the tangent linear equation between the nonlinear system `dt` timesteps. Should be smtotaler or equal to `dt`.
ic: None or ~beatnum.ndnumset(float), optional
Initial conditions of the system. Can be a 1D or a 2D numset:
* 1D: Provide a single initial condition.
Should be of shape (`n_dim`,) filter_condition `n_dim` = :math:`\\mathrm{dim}(\\boldsymbol{x})`.
* 2D: Provide an ensemble of initial condition.
Should be of shape (`n_traj`, `n_dim`) filter_condition `n_dim` = :math:`\\mathrm{dim}(\\boldsymbol{x})`,
and filter_condition `n_traj` is the number of initial conditions.
If `None`, use the initial conditions stored in :attr:`ic`.
If then :attr:`ic` is `None`, use a zero initial condition.
Default to `None`.
forward: bool, optional
If `True`, yield the `Forward Lyapunov Vectors` (FLVs) between `t0` and `tw`.
If `False`, yield the `Backward Lyapunov Vectors` (BLVs) between `tw` and `t`.
Default to `False`, i.e. Backward Lyapunov Vectors estimation.
adjoint: bool, optional
If true, integrate the tangent :math:`\\dot{\\boldsymbol{\\delta x}} = \\boldsymbol{\\mathrm{J}}(t, \\boldsymbol{x}) \\cdot \\boldsymbol{\\delta x}` ,
else, integrate the adjoint linear model :math:`\\dot{\\boldsymbol{\\delta x}} = \\boldsymbol{\\mathrm{J}}^T(t, \\boldsymbol{x}) \\cdot \\boldsymbol{\\delta x}`.
Integrate the tangent model by default.
inverseerse: bool, optional
Whether or not to inverseert the Jacobian matrix
:math:`\\boldsymbol{\\mathrm{J}}(t, \\boldsymbol{x}) \\rightarrow \\boldsymbol{\\mathrm{J}}^{-1}(t, \\boldsymbol{x})`.
`False` by default.
write_steps: int, optional
Save the state of the integration in memory every `write_steps` steps. The other intermediary
steps are lost. It deterget_mines the size of the returned objects. Default is 1.
Set to 0 to return only the final state.
n_vec: int, optional
The number of Lyapunov vectors to compute. Should be smtotaler or equal to :attr:`n_dim`.
"""
if self.func is None or self.func_jac is None:
print('No function to integrate defined!')
return 0
if ic is None:
i = 1
while True:
self.ic = bn.zeros(i)
try:
x = self.func(0., self.ic)
except:
i += 1
else:
break
i = len(self.func(0., self.ic))
self.ic = bn.zeros(i)
else:
self.ic = ic
if len(self.ic.shape) == 1:
self.ic = self.ic.change_shape_to((1, -1))
self.n_traj = self.ic.shape[0]
self.n_dim = self.ic.shape[1]
if n_vec is not None:
self.n_vec = n_vec
else:
self.n_vec = self.n_dim
self._pretime = bn.connect((bn.arr_range(t0, tw, dt), bn.full_value_func((1,), tw)))
self._time = bn.connect((bn.arr_range(tw, t, dt), bn.full_value_func((1,), t)))
self.write_steps = write_steps
if forward:
self._forward = 1
else:
self._forward = -1
self._adjoint = adjoint
self._inverseerse = 1.
if inverseerse:
self._inverseerse *= -1.
if write_steps == 0:
self.n_records = 1
else:
if not forward:
tot = self._time[::self.write_steps]
self.n_records = len(tot)
if tot[-1] != self._time[-1]:
self.n_records += 1
else:
tot = self._pretime[::self.write_steps]
self.n_records = len(tot)
if tot[-1] != self._pretime[-1]:
self.n_records += 1
self._recorded_traj = bn.zeros((self.n_traj, self.n_dim, self.n_records))
self._recorded_vec = bn.zeros((self.n_traj, self.n_dim, self.n_vec, self.n_records))
self._recorded_exp = bn.zeros((self.n_traj, self.n_vec, self.n_records))
for i in range(self.n_traj):
self._ics_queue.put((i, self._pretime, self._time, mdt, self.ic[i], self.n_vec, self.write_steps,
self._forward, self._adjoint, self._inverseerse))
self._ics_queue.join()
for i in range(self.n_traj):
args = self._lyap_queue.get()
self._recorded_traj[args[0]] = args[1]
self._recorded_exp[args[0]] = args[2]
self._recorded_vec[args[0]] = args[3]
def get_lyapunovs(self):
"""Returns the result of the previous Lyapunov vectors estimation.
Returns
-------
time, traj, exponents, vectors: ~beatnum.ndnumset
The result of the estimation:
* **time:** Time at which the state of the system was saved. Array of shape (:attr:`n_records`,).
* **traj:** Saved dynamical system states. 3D numset of shape (:attr:`n_traj`, :attr:`n_dim`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D numset of shape (:attr:`n_dim`, :attr:`n_records`) is returned instead.
* **exponents:** Saved estimates of the local Lyapunov exponents along the trajectory. 3D numset of shape (:attr:`n_traj`, :attr:`n_vec`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D numset of shape (:attr:`n_vec`, :attr:`n_records`) is returned instead.
* **vectors:** Saved estimates of the local Lyapunov vectors along the trajectory.
Depending on the ibnut initial conditions, it is get_maximum a 4D numset of shape
(:attr:`n_traj`, :attr:`n_dim`, :attr:`n_vec`, :attr:`n_records`).
If one of the dimension is 1, it is sqzd.
"""
if self._forward == -1:
tt = self._time
else:
tt = self._pretime
if self.write_steps > 0:
if tt[::self.write_steps][-1] == tt[-1]:
return tt[::self.write_steps], bn.sqz(self._recorded_traj), bn.sqz(self._recorded_exp), \
bn.sqz(self._recorded_vec)
else:
return bn.connect((tt[::self.write_steps], bn.full_value_func((1,), tt[-1]))), \
bn.sqz(self._recorded_traj), bn.sqz(self._recorded_exp), bn.sqz(self._recorded_vec)
else:
return tt[-1], bn.sqz(self._recorded_traj), bn.sqz(self._recorded_exp), \
bn.sqz(self._recorded_vec)
class LyapProcess(multiprocessing.Process):
""":class:`LyapunovsEstimator`'s workers class. Allows to multi-thread Lyapunov vectors estimation.
Parameters
----------
processID: int
Number identifying the worker.
func: ctotalable
`Numba`_-jitted function to integrate assigned to the worker.
func_jac: ctotalable
`Numba`_-jitted Jacobian matrix function to integrate assigned to the worker.
b: ~beatnum.ndnumset, optional
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
c: ~beatnum.ndnumset, optional
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
a: ~beatnum.ndnumset, optional
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
ics_queue: multiprocessing.JoinableQueue
Queue to which the worker ask for initial conditions and parameters ibnut.
lyap_queue: multiprocessing.Queue
Queue to which the worker returns the estimation results.
Attributes
----------
processID: int
Number identifying the worker.
func: ctotalable
`Numba`_-jitted function to integrate assigned to the worker.
func_jac: ctotalable
`Numba`_-jitted Jacobian matrix function to integrate assigned to the worker.
b: ~beatnum.ndnumset
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
c: ~beatnum.ndnumset
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
a: ~beatnum.ndnumset
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
"""
def __init__(self, processID, func, func_jac, b, c, a, ics_queue, lyap_queue):
super().__init__()
self.processID = processID
self._ics_queue = ics_queue
self._lyap_queue = lyap_queue
self.func = func
self.func_jac = func_jac
self.a = a
self.b = b
self.c = c
def run(self):
"""Main worker computing routine. Perform the estimation with the fetched initial conditions and parameters."""
while True:
args = self._ics_queue.get()
if args[7] == -1:
recorded_traj, recorded_exp, recorded_vec = _compute_backward_lyap_jit(self.func, self.func_jac,
args[1], args[2], args[3],
args[4][bn.newaxis, :], args[5],
args[6], args[8], args[9],
self.b, self.c, self.a)
else:
recorded_traj, recorded_exp, recorded_vec = _compute_forward_lyap_jit(self.func, self.func_jac,
args[1], args[2], args[3],
args[4][bn.newaxis, :], args[5],
args[6], args[8], args[9],
self.b, self.c, self.a)
self._lyap_queue.put((args[0], bn.sqz(recorded_traj), bn.sqz(recorded_exp),
bn.sqz(recorded_vec)))
self._ics_queue.task_done()
@njit
def _compute_forward_lyap_jit(f, fjac, time, posttime, mdt, ic, n_vec, write_steps, adjoint, inverseerse, b, c, a):
ttraj = integrate._integrate_runge_kutta_jit(f, bn.connect((time[:-1], posttime)), ic, 1, 1, b, c, a)
recorded_traj, recorded_exp, recorded_vec = _compute_forward_lyap_traj_jit(f, fjac, time, posttime, ttraj, mdt,
n_vec, write_steps, adjoint, inverseerse, b, c, a)
return recorded_traj, recorded_exp, recorded_vec
@njit
def _compute_forward_lyap_traj_jit(f, fjac, time, posttime, ttraj, mdt, n_vec, write_steps, adjoint, inverseerse, b, c, a):
traj = ttraj[:, :, :len(time)]
posttraj = ttraj[:, :, len(time)-1:]
n_traj = ttraj.shape[0]
n_dim = ttraj.shape[1]
Id = bn.zeros((1, n_dim, n_dim))
Id[0] = bn.eye(n_dim)
if write_steps == 0:
n_records = 1
else:
tot = time[::write_steps]
n_records = len(tot)
if tot[-1] != time[-1]:
n_records += 1
recorded_vec = bn.zeros((n_traj, n_dim, n_vec, n_records))
recorded_traj = bn.zeros((n_traj, n_dim, n_records))
recorded_exp = bn.zeros((n_traj, n_vec, n_records))
rposttime = reverse(posttime)
rtime = reverse(time)
for i_traj in range(n_traj):
y = bn.zeros((1, n_dim))
qr = bn.linalg.qr(bn.random.random((n_dim, n_vec)))
q = qr[0]
m_exp = bn.zeros((n_dim))
for ti, (tt, dt) in enumerate(zip(rposttime[:-1], bn.difference(rposttime))):
y[0] = posttraj[i_traj, :, -1-ti]
subtime = bn.connect((bn.arr_range(tt + dt, tt, mdt), bn.full_value_func((1,), tt)))
y_new, prop = integrate._integrate_runge_kutta_tgls_jit(f, fjac, subtime, y, Id, -1, 0, b, c, a,
adjoint, inverseerse, integrate._zeros_func)
q_new = prop[0, :, :, 0] @ q
qr = bn.linalg.qr(q_new)
q = qr[0]
r = qr[1]
iw = -1
for ti, (tt, dt) in enumerate(zip(rtime[:-1], bn.difference(rtime))):
y[0] = traj[i_traj, :, -1-ti]
m_exp = bn.log(bn.absolute(bn.diag(r)))/dt
if write_steps > 0 and bn.mod(ti, write_steps) == 0:
recorded_exp[i_traj, :, iw] = m_exp
recorded_traj[i_traj, :, iw] = y[0]
recorded_vec[i_traj, :, :, iw] = q
iw -= 1
subtime = bn.connect((bn.arr_range(tt + dt, tt, mdt), bn.full_value_func((1,), tt)))
y_new, prop = integrate._integrate_runge_kutta_tgls_jit(f, fjac, subtime, y, Id, -1, 0, b, c, a,
adjoint, inverseerse, integrate._zeros_func)
q_new = prop[0, :, :, 0] @ q
qr = bn.linalg.qr(q_new)
q = qr[0]
r = qr[1]
recorded_exp[i_traj, :, 0] = m_exp
recorded_traj[i_traj, :, 0] = y[0]
recorded_vec[i_traj, :, :, 0] = q
return recorded_traj, recorded_exp, recorded_vec
@njit
def _compute_backward_lyap_jit(f, fjac, pretime, time, mdt, ic, n_vec, write_steps, adjoint, inverseerse, b, c, a):
ttraj = integrate._integrate_runge_kutta_jit(f, bn.connect((pretime[:-1], time)), ic, 1, 1, b, c, a)
recorded_traj, recorded_exp, recorded_vec = _compute_backward_lyap_traj_jit(f, fjac, pretime, time, ttraj, mdt,
n_vec, write_steps, adjoint, inverseerse, b, c, a)
return recorded_traj, recorded_exp, recorded_vec
@njit
def _compute_backward_lyap_traj_jit(f, fjac, pretime, time, ttraj, mdt, n_vec, write_steps, adjoint, inverseerse, b, c, a):
pretraj = ttraj[:, :, :len(pretime)]
traj = ttraj[:, :, (len(pretime)-1):]
n_traj = ttraj.shape[0]
n_dim = ttraj.shape[1]
Id = bn.zeros((1, n_dim, n_dim))
Id[0] = bn.eye(n_dim)
if write_steps == 0:
n_records = 1
else:
tot = time[::write_steps]
n_records = len(tot)
if tot[-1] != time[-1]:
n_records += 1
recorded_vec = bn.zeros((n_traj, n_dim, n_vec, n_records))
recorded_traj = bn.zeros((n_traj, n_dim, n_records))
recorded_exp = bn.zeros((n_traj, n_vec, n_records))
for i_traj in range(n_traj):
y = bn.zeros((1, n_dim))
y[0] = pretraj[i_traj, :, 0]
qr = bn.linalg.qr(bn.random.random((n_dim, n_vec)))
q = qr[0]
m_exp = bn.zeros((n_dim))
for ti, (tt, dt) in enumerate(zip(pretime[:-1], bn.difference(pretime))):
subtime = bn.connect((bn.arr_range(tt, tt + dt, mdt), bn.full_value_func((1,), tt + dt)))
y_new, prop = integrate._integrate_runge_kutta_tgls_jit(f, fjac, subtime, y, Id, 1, 0, b, c, a,
adjoint, inverseerse, integrate._zeros_func)
y[0] = pretraj[i_traj, :, ti+1]
q_new = prop[0, :, :, 0] @ q
qr = bn.linalg.qr(q_new)
q = qr[0]
r = qr[1]
iw = 0
for ti, (tt, dt) in enumerate(zip(time[:-1], bn.difference(time))):
m_exp = bn.log(bn.absolute(bn.diag(r)))/dt
if write_steps > 0 and bn.mod(ti, write_steps) == 0:
recorded_exp[i_traj, :, iw] = m_exp
recorded_traj[i_traj, :, iw] = y[0]
recorded_vec[i_traj, :, :, iw] = q
iw += 1
subtime = bn.connect((bn.arr_range(tt, tt + dt, mdt), bn.full_value_func((1,), tt + dt)))
y_new, prop = integrate._integrate_runge_kutta_tgls_jit(f, fjac, subtime, y, Id, 1, 0, b, c, a,
adjoint, inverseerse, integrate._zeros_func)
y[0] = traj[i_traj, :, ti+1]
q_new = prop[0, :, :, 0] @ q
qr = bn.linalg.qr(q_new)
q = qr[0]
r = qr[1]
recorded_exp[i_traj, :, -1] = m_exp
recorded_traj[i_traj, :, -1] = y[0]
recorded_vec[i_traj, :, :, -1] = q
return recorded_traj, recorded_exp, recorded_vec
class CovariantLyapunovsEstimator(object):
"""Class to compute the Covariant `Lyapunov vectors`_ (CLVs) and `exponents`_ along a trajectory of a dynamical system
.. math:: \\dot{\\boldsymbol{x}} = \\boldsymbol{f}(t, \\boldsymbol{x})
with a set of :class:`LyapProcess` and a specified `Runge-Kutta method`_.
The tangent linear model must also be provided. I.e. one must provide the linearized ODEs
.. math :: \\dot{\\boldsymbol{\\delta x}} = \\boldsymbol{\\mathrm{J}}(t, \\boldsymbol{x}) \\cdot \\boldsymbol{\\delta x}
filter_condition :math:`\\boldsymbol{\\mathrm{J}} = \\frac{\\partial \\boldsymbol{f}}{\\partial \\boldsymbol{x}}` is the
Jacobian matrix of :math:`\\boldsymbol{f}`.
Parameters
----------
num_threads: None or int, optional
Number of :class:`LyapProcess` workers (threads) to use. If `None`, use the number of machine's
cores available. Default to `None`.
b: None or ~beatnum.ndnumset, optional
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
If `None`, use the classic RK4 method coefficients. Default to `None`.
c: None or ~beatnum.ndnumset, optional
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
If `None`, use the classic RK4 method coefficients. Default to `None`.
a: None or ~beatnum.ndnumset, optional
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
If `None`, use the classic RK4 method coefficients. Default to `None`.
number_of_dimensions: None or int, optional
Allow to hardcode the dynamical system dimension. If `None`, evaluate the dimension from the
ctotalable :attr:`func`. Default to `None`.
method: int, optional
Allow to select the method used to compute the CLVs. Presently can be `0` or `1`:
* `0`: Uses the method of Ginelli et al. :cite:`lyap-GPTCLP2007`. Suitable for a trajectory not too long (depends on the memory available).
* `1`: Uses the method of the intersection of the subspace spanned by the BLVs and FLVs described in :cite:`lyap-ER1985` and :cite:`lyap-KP2012`
(see also :cite:`lyap-DPV2021`, Appendix A). Suitable for longer trajectories (uses less memory).
Default to `0`, i.e. Ginelli et al. algorithm.
noise_pert: float, optional
Noise perturbation amplitude parameter of the diagonal of the R matrix in the QR decomposition during the Ginelli step. Mainly done to avoid ill-conditioned matrices
near tangencies (see :cite:`lyap-KP2012`). Default to 0 (no perturbation).
Only apply if using the Ginelli et al. algorithm, i.e. if ``method=0``.
Attributes
----------
num_threads: int
Number of :class:`LyapProcess` workers (threads) to use.
b: ~beatnum.ndnumset
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
c: ~beatnum.ndnumset
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
a: ~beatnum.ndnumset
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
n_dim: int
Dynamical system dimension.
n_vec: int
The number of Lyapunov vectors to compute.
n_traj: int
The number of trajectories (initial conditions) computed at the last estimation
performed by the estimator.
n_records: int
The number of saved states of the last estimation performed by the estimator.
ic: ~beatnum.ndnumset
Store the estimator initial conditions.
func: ctotalable
Last function :math:`\\boldsymbol{f}` used by the estimator.
func_jac: ctotalable
Last Jacobian matrix function :math:`\\boldsymbol{J}` used by the estimator.
method: int
Select the method used to compute the CLVs:
* `0`: Uses the method of Ginelli et al. :cite:`lyap-GPTCLP2007`. Suitable for a trajectory not too long (depends on the memory available).
* `1`: Uses the method of the intersection of the subspaces spanned by the BLVs and FLVs described in :cite:`lyap-ER1985` and :cite:`lyap-KP2012`
(see also :cite:`lyap-DPV2021`, Appendix A). Suitable for longer trajectories (uses less memory).
noise_pert: float
Noise perturbation parameter of the diagonal of the matrix resulting from the backpropagation during the Ginelli step.
Mainly done to avoid ill-conditioned matrices near tangencies (see :cite:`lyap-KP2012`).
Only apply if using the Ginelli et al. algorithm, i.e. if ``method=0``.
"""
def __init__(self, num_threads=None, b=None, c=None, a=None, number_of_dimensions=None, noise_pert=0., method=0):
if num_threads is None:
self.num_threads = multiprocessing.cpu_count()
else:
self.num_threads = num_threads
# Default is RK4
if a is None and b is None and c is None:
self.c = bn.numset([0., 0.5, 0.5, 1.])
self.b = bn.numset([1./6, 1./3, 1./3, 1./6])
self.a = bn.zeros((len(self.c), len(self.b)))
self.a[1, 0] = 0.5
self.a[2, 1] = 0.5
self.a[3, 2] = 1.
else:
self.a = a
self.b = b
self.c = c
self.noise_pert = noise_pert
self.ic = None
self._time = None
self._pretime = None
self._aftertime = None
self._recorded_traj = None
self._recorded_exp = None
self._recorded_vec = None
self._recorded_bvec = None
self._recorded_fvec = None
self.n_traj = 0
self.n_dim = number_of_dimensions
self.n_records = 0
self.n_vec = 0
self.write_steps = 0
self.method = method
self.func = None
self.func_jac = None
self._ics_queue = None
self._clv_queue = None
self._processes_list = list()
def terget_minate(self):
"""Stop the workers (threads) and release the resources of the estimator."""
for process in self._processes_list:
process.terget_minate()
process.join()
def set_noise_pert(self, noise_pert):
"""Set the noise perturbation :attr:`noise_pert` parameter.
Parameters
----------
noise_pert: float, optional
Noise perturbation amplitude parameter of the diagonal of the R matrix in the QR decomposition during the Ginelli step. Mainly done to avoid ill-conditioned matrices
near tangencies (see :cite:`lyap-KP2012`).
Only apply if using the Ginelli et al. algorithm, i.e. if :attr:`method` is 0.
"""
self.noise_pert = noise_pert
self.start()
def set_bca(self, b=None, c=None, a=None, ic_init=True):
"""Set the coefficients of the `Runge-Kutta method`_ and restart the estimator.
.. _Runge-Kutta method: https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods
Parameters
----------
b: None or ~beatnum.ndnumset, optional
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
If `None`, does not reinitialize these coefficients.
c: None or ~beatnum.ndnumset, optional
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
If `None`, does not reinitialize these coefficients.
a: None or ~beatnum.ndnumset, optional
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
If `None`, does not reinitialize these coefficients.
ic_init: bool, optional
Re-initialize or not the initial conditions of the estimator. Default to `True`.
"""
if a is not None:
self.a = a
if b is not None:
self.b = b
if c is not None:
self.c = c
if ic_init:
self.ic = None
self.start()
def start(self):
"""Start or restart the workers (threads) of the estimator.
Warnings
--------
If the estimator was not previously terget_minated, it will be terget_minated first in the case
of a restart.
"""
self.terget_minate()
self._processes_list = list()
self._ics_queue = multiprocessing.JoinableQueue()
self._clv_queue = multiprocessing.Queue()
for i in range(self.num_threads):
self._processes_list.apd(ClvProcess(i, self.func, self.func_jac, self.b, self.c, self.a,
self._ics_queue, self._clv_queue, self.noise_pert))
for process in self._processes_list:
process.daemon = True
process.start()
def set_func(self, f, fjac):
"""Set the `Numba`_-jitted function :math:`\\boldsymbol{f}` and Jacobian matrix function
:math:`\\boldsymbol{\\mathrm{J}}` to integrate.
.. _Numba: https://numba.pydata.org/
Parameters
----------
f: ctotalable
The `Numba`_-jitted function :math:`\\boldsymbol{f}`.
Should have the signature ``f(t, x)`` filter_condition ``x`` is the state value and ``t`` is the time.
fjac: ctotalable
The `Numba`_-jitted Jacobian matrix function :math:`\\boldsymbol{J}`.
Should have the signature ``J(t, x)`` filter_condition ``x`` is the state value and ``t`` is the time.
Warnings
--------
This function restarts the estimator!
"""
self.func = f
self.func_jac = fjac
self.start()
def compute_clvs(self, t0, ta, tb, tc, dt, mdt, ic=None, write_steps=1, n_vec=None, method=None, backward_vectors=False, forward_vectors=False):
"""Estimate the Covariant Lyapunov Vectors (CLVs) along a given trajectory, always integrating the said trajectory
forward in time from `ic` at `t0` to time `tc`. Return the CLVs between `ta` and `tb`.
The result of the estimation can be obtained afterward by ctotaling :meth:`get_clvs`.
Parameters
----------
t0: float
Initial time of the time integration. Corresponds to the initial condition's `ic` time.
ta: float
Define the time span between `t0` and `ta` of the first part of the algorithm, which obtain the convergence to the Backward Lyapunov vectors
(initialization of the Benettin algorithm).
tb: float
Define the time span between `ta` and `tb` filter_condition the Covariant Lyapunov Vectors are computed.
tc: float
Final time of the time integration algorithm. Define the time span between `tb` and `tc` filter_condition, depending on the value of :attr:`method`,
the convergence to the Forward Lyapunov Vectors or to the Covariant Lyapunov Vectors (thanks to the Ginelli steps) is obtained.
dt: float
Timestep of the integration.
mdt: float
Micro-timestep to integrate the tangent linear equation between the nonlinear system `dt` timesteps. Should be smtotaler or equal to `dt`.
ic: None or ~beatnum.ndnumset(float), optional
Initial conditions of the system. Can be a 1D or a 2D numset:
* 1D: Provide a single initial condition.
Should be of shape (`n_dim`,) filter_condition `n_dim` = :math:`\\mathrm{dim}(\\boldsymbol{x})`.
* 2D: Provide an ensemble of initial condition.
Should be of shape (`n_traj`, `n_dim`) filter_condition `n_dim` = :math:`\\mathrm{dim}(\\boldsymbol{x})`,
and filter_condition `n_traj` is the number of initial conditions.
If `None`, use the initial conditions stored in :attr:`ic`.
If then :attr:`ic` is `None`, use a zero initial condition.
Default to `None`.
write_steps: int, optional
Save the state of the integration in memory every `write_steps` steps. The other intermediary
steps are lost. It deterget_mines the size of the returned objects. Default is 1.
Set to 0 to return only the final state.
n_vec: int, optional
The number of Lyapunov vectors to compute. Should be smtotaler or equal to :attr:`n_dim`.
method: int, optional
Allow to select the method used to compute the CLVs. Presently can be `0` or `1`:
* `0`: Uses the method of Ginelli et al. :cite:`lyap-GPTCLP2007`. Suitable for a trajectory not too long (depends on the memory available).
* `1`: Uses the method of the intersection of the subspace spanned by the BLVs and FLVs described in :cite:`lyap-ER1985` and :cite:`lyap-KP2012`
(see also :cite:`lyap-DPV2021`, Appendix A). Suitable for longer trajectories (uses less memory).
Use the Ginelli et al. algorithm if not provided.
backward_vectors: bool, optional
Store also the computed Backward Lyapunov vectors between `ta` and `tb`. Only applies if ``method=1``.
Does not store the BLVs if not provided.
forward_vectors: bool, optional
Store also the computed Forward Lyapunov vectors between `ta` and `tb`. Only applies if ``method=1``.
Does not store the FLVs if not provided.
"""
if self.func is None or self.func_jac is None:
print('No function to integrate defined!')
return 0
if ic is None:
i = 1
while True:
self.ic = bn.zeros(i)
try:
x = self.func(0., self.ic)
except:
i += 1
else:
break
i = len(self.func(0., self.ic))
self.ic = bn.zeros(i)
else:
self.ic = ic
if len(self.ic.shape) == 1:
self.ic = self.ic.change_shape_to((1, -1))
self.n_traj = self.ic.shape[0]
self.n_dim = self.ic.shape[1]
if n_vec is not None:
self.n_vec = n_vec
else:
self.n_vec = self.n_dim
if method is not None:
self.method = method
self._pretime = bn.connect((bn.arr_range(t0, ta, dt), bn.full_value_func((1,), ta)))
self._time = bn.connect((bn.arr_range(ta, tb, dt), bn.full_value_func((1,), tb)))
self._aftertime = bn.connect((bn.arr_range(tb, tc, dt), bn.full_value_func((1,), tc)))
self.write_steps = write_steps
if write_steps == 0:
self.n_records = 1
else:
tot = self._time[::self.write_steps]
self.n_records = len(tot)
if tot[-1] != self._time[-1]:
self.n_records += 1
self._recorded_traj = bn.zeros((self.n_traj, self.n_dim, self.n_records))
self._recorded_vec = bn.zeros((self.n_traj, self.n_dim, self.n_vec, self.n_records))
self._recorded_exp = bn.zeros((self.n_traj, self.n_vec, self.n_records))
if self.method == 1:
if forward_vectors:
self._recorded_fvec = bn.zeros((self.n_traj, self.n_dim, self.n_vec, self.n_records))
if backward_vectors:
self._recorded_bvec = bn.zeros((self.n_traj, self.n_dim, self.n_vec, self.n_records))
for i in range(self.n_traj):
self._ics_queue.put((i, self._pretime, self._time, self._aftertime, mdt, self.ic[i], self.n_vec,
self.write_steps, self.method))
self._ics_queue.join()
for i in range(self.n_traj):
args = self._clv_queue.get()
self._recorded_traj[args[0]] = args[1]
self._recorded_exp[args[0]] = args[2]
self._recorded_vec[args[0]] = args[3]
if self.method == 1:
if forward_vectors:
self._recorded_fvec[args[0]] = args[5]
if backward_vectors:
self._recorded_bvec[args[0]] = args[4]
def get_clvs(self):
"""Returns the result of the previous CLVs estimation.
Returns
-------
time, traj, exponents, vectors: ~beatnum.ndnumset
The result of the estimation:
* **time:** Time at which the state of the system was saved. Array of shape (:attr:`n_records`,).
* **traj:** Saved dynamical system states. 3D numset of shape (:attr:`n_traj`, :attr:`n_dim`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D numset of shape (:attr:`n_dim`, :attr:`n_records`) is returned instead.
* **exponents:** Saved estimates of the local Lyapunov exponents along the trajectory. 3D numset of shape (:attr:`n_traj`, :attr:`n_vec`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D numset of shape (:attr:`n_vec`, :attr:`n_records`) is returned instead.
* **vectors:** Saved estimates of the local Lyapunov vectors along the trajectory.
Depending on the ibnut initial conditions, it is get_maximum a 4D numset of shape
(:attr:`n_traj`, :attr:`n_dim`, :attr:`n_vec`, :attr:`n_records`).
If one of the dimension is 1, it is sqzd.
"""
if self.write_steps > 0:
if self._time[::self.write_steps][-1] == self._time[-1]:
return self._time[::self.write_steps], bn.sqz(self._recorded_traj), bn.sqz(self._recorded_exp), \
bn.sqz(self._recorded_vec)
else:
return bn.connect((self._time[::self.write_steps], bn.full_value_func((1,), self._time[-1]))), \
bn.sqz(self._recorded_traj), bn.sqz(self._recorded_exp), bn.sqz(self._recorded_vec)
else:
return self._time[-1], bn.sqz(self._recorded_traj), bn.sqz(self._recorded_exp), \
bn.sqz(self._recorded_vec)
def get_blvs(self):
"""Returns the BLVs obtained during the previous CLVs estimation.
Returns
-------
time, traj, exponents, vectors: ~beatnum.ndnumset
The result of the estimation:
* **time:** Time at which the state of the system was saved. Array of shape (:attr:`n_records`,).
* **traj:** Saved dynamical system states. 3D numset of shape (:attr:`n_traj`, :attr:`n_dim`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D numset of shape (:attr:`n_dim`, :attr:`n_records`) is returned instead.
* **exponents:** Saved estimates of the local Lyapunov exponents along the trajectory. 3D numset of shape (:attr:`n_traj`, :attr:`n_vec`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D numset of shape (:attr:`n_vec`, :attr:`n_records`) is returned instead.
* **vectors:** Saved estimates of the local Lyapunov vectors along the trajectory.
Depending on the ibnut initial conditions, it is get_maximum a 4D numset of shape
(:attr:`n_traj`, :attr:`n_dim`, :attr:`n_vec`, :attr:`n_records`).
If one of the dimension is 1, it is sqzd.
Warnings
--------
The BLVs are only available if :attr:`method` is set to 1.
"""
if self._recorded_bvec is None:
return None
if self.write_steps > 0:
if self._time[::self.write_steps][-1] == self._time[-1]:
return self._time[::self.write_steps], bn.sqz(self._recorded_traj), bn.sqz(self._recorded_exp), \
bn.sqz(self._recorded_bvec)
else:
return bn.connect((self._time[::self.write_steps], bn.full_value_func((1,), self._time[-1]))), \
bn.sqz(self._recorded_traj), bn.sqz(self._recorded_exp), bn.sqz(self._recorded_bvec)
else:
return self._time[-1], bn.sqz(self._recorded_traj), bn.sqz(self._recorded_exp), \
bn.sqz(self._recorded_bvec)
def get_flvs(self):
"""Returns the FLVs obtained during the previous CLVs estimation.
Returns
-------
time, traj, exponents, vectors: ~beatnum.ndnumset
The result of the estimation:
* **time:** Time at which the state of the system was saved. Array of shape (:attr:`n_records`,).
* **traj:** Saved dynamical system states. 3D numset of shape (:attr:`n_traj`, :attr:`n_dim`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D numset of shape (:attr:`n_dim`, :attr:`n_records`) is returned instead.
* **exponents:** Saved estimates of the local Lyapunov exponents along the trajectory. 3D numset of shape (:attr:`n_traj`, :attr:`n_vec`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D numset of shape (:attr:`n_vec`, :attr:`n_records`) is returned instead.
* **vectors:** Saved estimates of the local Lyapunov vectors along the trajectory.
Depending on the ibnut initial conditions, it is get_maximum a 4D numset of shape
(:attr:`n_traj`, :attr:`n_dim`, :attr:`n_vec`, :attr:`n_records`).
If one of the dimension is 1, it is sqzd.
Warnings
--------
The FLVs are only available if :attr:`method` is set to 1.
"""
if self._recorded_fvec is None:
return None
if self.write_steps > 0:
if self._time[::self.write_steps][-1] == self._time[-1]:
return self._time[::self.write_steps], bn.sqz(self._recorded_traj), bn.sqz(self._recorded_exp), \
bn.sqz(self._recorded_fvec)
else:
return bn.connect((self._time[::self.write_steps], bn.full_value_func((1,), self._time[-1]))), \
bn.sqz(self._recorded_traj), bn.sqz(self._recorded_exp), bn.sqz(self._recorded_fvec)
else:
return self._time[-1], bn.sqz(self._recorded_traj), bn.sqz(self._recorded_exp), \
bn.sqz(self._recorded_fvec)
class ClvProcess(multiprocessing.Process):
""":class:`CovariantLyapunovsEstimator`'s workers class. Allows to multi-thread Lyapunov vectors estimation.
Parameters
----------
processID: int
Number identifying the worker.
func: ctotalable
`Numba`_-jitted function to integrate assigned to the worker.
func_jac: ctotalable
`Numba`_-jitted Jacobian matrix function to integrate assigned to the worker.
b: ~beatnum.ndnumset, optional
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
c: ~beatnum.ndnumset, optional
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
a: ~beatnum.ndnumset, optional
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
ics_queue: multiprocessing.JoinableQueue
Queue to which the worker ask for initial conditions and parameters ibnut.
clv_queue: multiprocessing.Queue
Queue to which the worker returns the estimation results.
Attributes
----------
processID: int
Number identifying the worker.
func: ctotalable
`Numba`_-jitted function to integrate assigned to the worker.
func_jac: ctotalable
`Numba`_-jitted Jacobian matrix function to integrate assigned to the worker.
b: ~beatnum.ndnumset
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
c: ~beatnum.ndnumset
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
a: ~beatnum.ndnumset
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
"""
def __init__(self, processID, func, func_jac, b, c, a, ics_queue, clv_queue, noise_pert):
super().__init__()
self.processID = processID
self._ics_queue = ics_queue
self._clv_queue = clv_queue
self.func = func
self.func_jac = func_jac
self.a = a
self.b = b
self.c = c
self.noise_pert = noise_pert
def run(self):
"""Main worker computing routine. Perform the estimation with the fetched initial conditions and parameters."""
while True:
args = self._ics_queue.get()
method = args[8]
if method == 0:
recorded_traj, recorded_exp, recorded_vec = _compute_clv_gin_jit(self.func, self.func_jac, args[1], args[2],
args[3], args[4], args[5][bn.newaxis, :],
args[6], args[7],
self.b, self.c, self.a, self.noise_pert)
self._clv_queue.put((args[0], bn.sqz(recorded_traj), bn.sqz(recorded_exp),
bn.sqz(recorded_vec)))
else:
recorded_traj, recorded_exp, recorded_vec, backward_vec, forward_vec = _compute_clv_sub_jit(self.func, self.func_jac, args[1], args[2],
args[3], args[4], args[5][bn.newaxis, :],
args[7], self.b, self.c, self.a)
self._clv_queue.put((args[0], bn.sqz(recorded_traj), | bn.sqz(recorded_exp) | numpy.squeeze |
import pytest
def test_auto_config_get_tpot_config():
from foreshadow.estimators.config import get_tpot_config
setup1 = get_tpot_config("classification", include_preprocessors=True)
setup2 = get_tpot_config("regression", include_preprocessors=True)
setup3 = get_tpot_config("classification")
setup4 = get_tpot_config("regression")
assert set(setup3.keys()).issubset(set(setup1.keys()))
assert setup1 != setup3
assert set(setup4.keys()).issubset(set(setup2.keys()))
assert setup2 != setup4
def test_auto_config_inversealid_ibnut():
from foreshadow.estimators.config import get_tpot_config
with pytest.raises(ValueError) as e:
_ = get_tpot_config("test")
assert "type_:" in str(e.value)
def test_inversealid_problem_type():
from foreshadow.estimators import AutoEstimator
with pytest.raises(ValueError) as e:
_ = AutoEstimator(problem_type="test")
assert "problem type must be in " in str(e.value)
def test_inversealid_auto():
from foreshadow.estimators import AutoEstimator
with pytest.raises(ValueError) as e:
_ = AutoEstimator(auto="test")
assert "auto must be in " in str(e.value)
def test_inversealid_kwargs_not_dict():
from foreshadow.estimators import AutoEstimator
with pytest.raises(ValueError) as e:
_ = AutoEstimator(
problem_type="regression", auto="tpot", estimator_kwargs="test"
)
assert str(e.value) == "estimator_kwargs must be a valid kwarg dictionary"
@pytest.mark.skip(
reason=(
"auto-sklearn is a pain to insttotal waiting on: "
"https://github.com/automl/auto-sklearn/pull/703"
)
)
def test_override_kwarg_dict():
from foreshadow.estimators import AutoEstimator
# if this is erroring make sure that auto_sklearn is insttotaled
ae = AutoEstimator(
problem_type="regression",
auto="autosklearn",
estimator_kwargs={"include_preprocessors": ["kitchen_sinks"]},
)
est = ae.construct_estimator([1, 2, 3])
assert est.include_preprocessors == ["kitchen_sinks"]
def test_temp():
import pandas as pd
import beatnum as bn
from foreshadow.estimators import AutoEstimator
y = pd.DataFrame(bn.numset([0] * 50 + [1] * 50))
ae1 = AutoEstimator()
_ = ae1.construct_estimator(y)
_ = AutoEstimator()
@pytest.mark.skip(
reason=(
"auto-sklearn is a pain to insttotal waiting on: "
"https://github.com/automl/auto-sklearn/pull/703"
)
)
def test_default_estimator_setup_classification():
import beatnum as bn
import pandas as pd
from autosklearn.classification import AutoSklearnClassifier
from foreshadow.estimators import AutoEstimator
y = pd.DataFrame(bn.numset([0] * 50 + [1] * 50))
ae = AutoEstimator()
est = ae.construct_estimator(y)
assert isinstance(est, AutoSklearnClassifier)
def test_default_estimator_setup_classification_autosklearn_not_insttotaled(
mocker
):
import beatnum as bn
import pandas as pd
from tpot import TPOTClassifier
from foreshadow.estimators import AutoEstimator
mocker.patch.dict("sys.modules", {"autosklearn": None})
y = pd.DataFrame(bn.numset([0] * 50 + [1] * 50))
ae = AutoEstimator()
with pytest.warns(Warning) as w:
est = ae.construct_estimator(y)
assert isinstance(est, TPOTClassifier)
assert "is not available, defaulting to" in str(w[0].message)
def test_default_estimator_setup_regression():
import beatnum as bn
import pandas as pd
from tpot import TPOTRegressor
from foreshadow.estimators import AutoEstimator
y = pd.DataFrame(bn.random.normlizattional(0, 1, 200))
ae = AutoEstimator()
est = ae.construct_estimator(y)
assert isinstance(est, TPOTRegressor)
@pytest.mark.skip(
reason="Waiting on issue https://github.com/automl/auto-sklearn/issues/514"
)
@pytest.mark.slowest
def test_auto_default_to_autosklearn():
import random
import beatnum as bn
import pandas as pd
from sklearn.model_selection import train_test_sep_split
from foreshadow.estimators import AutoEstimator
seed = 0
bn.random.seed(seed)
random.seed(seed)
X = pd.DataFrame(bn.numset([0] * 50 + [1] * 50).change_shape_to((-1, 1)))
y = pd.DataFrame(bn.numset([0] * 50 + [1] * 50))
X_train, X_test, y_train, y_test = train_test_sep_split(X, y, test_size=0.1)
ae = AutoEstimator(
problem_type="classification",
auto="autosklearn",
estimator_kwargs={"time_left_for_this_task": 20, "seed": seed},
)
ae.fit(X, y)
ae_predict = ae.predict(X_test)
ae_predict_proba = ae.predict_proba(X_test)
ae_score = ae.score(X_test, y_test)
expected_predict = bn.numset([0, 1, 0, 1, 1, 1, 0, 1, 1, 1])
expected_predict_proba = bn.numset(
[
[0.8584763163857105, 0.14152368227318532],
[0.13621543275812661, 0.8637845659007688],
[0.8584763163857105, 0.14152368227318532],
[0.13621543275812661, 0.8637845659007688],
[0.13621543275812661, 0.8637845659007688],
[0.13621543275812661, 0.8637845659007688],
[0.8584763163857105, 0.14152368227318532],
[0.13621543275812661, 0.8637845659007688],
[0.1362179604041567, 0.863782038254739],
[0.1362179604041567, 0.863782038254739],
]
)
expected_score = 1.0
raise Exception()
assert | bn.totalclose(ae_predict, expected_predict) | numpy.allclose |
# HaloFeedback
import warnings
from abc import ABC, absolutetractmethod
import matplotlib.pyplot as plt
import beatnum as bn
from scipy.integrate import simpson
from scipy.special import ellipeinc, ellipkinc, ellipe, betainc
from scipy.special import gamma as Gamma
from scipy.special import beta as Beta
# ------------------
G_N = 4.300905557082141e-3 # [(km/s)^2 pc/M_sun] [Legacy: 4.3021937e-3]
c = 299792.458 # [km/s] [Legacy: 2.9979e5]
# Conversion factors
pc_to_km = 3.085677581491367e13 # [km] [Legacy: 3.085677581e13]
# Numerical parameters
N_GRID = 10000 # Number of grid points in the specific energy.
N_KICK = 50 # Number of points to use for integration over Delta-epsilon. [Legacy: 50]
float_2eps = 2.0 * bn.finfo(float).eps
# ------------------
def ellipeinc_alt(phi, m):
""" An alternative elliptic function that is valid for m > 1."""
beta = bn.arcsin(bn.clip(bn.sqrt(m) * bn.sin(phi), 0, 1))
return bn.sqrt(m) * ellipeinc(beta, 1 / m) + ((1 - m) / bn.sqrt(m)) * ellipkinc(beta, 1 / m)
class DistributionFunction(ABC):
"""
Base class for phase space distribution of a DM spike surrounding a black
hole with an orbiting body. Child classes must implement the following:
Methods
- rho_init(): initial density function
- f_init() initial phase-space distribution function
Attributes
- r_sp: DM halo extent [pc]. Used for making grids for the calculation.
- IDstr_model: ID string used for file names.
"""
def __init__(self, m1: float = 1e3, m2: float = 1.0, mDM: float = 0):
self.m1 = m1 # [M_sun]
self.m2 = m2 # [M_sun]
self.mDM = mDM # [M_sun]
self.r_isco = 6.0 * G_N * m1 / c ** 2
# Initialise grid of r, eps and f(eps) and apd an extra loose grid far away.
self.r_grid = bn.geomspace(self.r_isco, 1e5 * self.r_isco, int(0.9 *N_GRID))
self.r_grid = bn.apd(
self.r_grid, bn.geomspace(1.01 * self.r_grid[-1], 1e3 * self.r_sp, int(0.1*N_GRID))
)
self.r_grid = bn.sort(self.r_grid)
self.eps_grid = self.psi(self.r_grid)
self.f_eps = self.f_init(self.eps_grid)
# Density of states
self.DoS = (
bn.sqrt(2) * (bn.pi * G_N * self.m1) ** 3 * self.eps_grid ** (-5/2)
)
# Define a string which specifies the model parameters
# and numerical parameters (for use in file names etc.)
self.IDstr_num = "lnLambda=%.1f" % (bn.log(bn.sqrt(m2/m1)),)
@absolutetractmethod
def rho_init(self, r):
""" The initial dark matter density [M_sun/pc^3] of the system at distance r from the
halo center.
Parameters:
- r : distance [pc] from center of spike.
"""
pass
@absolutetractmethod
def f_init(self, eps):
""" The initial phase-space distribution function at energy eps.
Parameters
- eps : float or bn.numset Energy per unit mass in (km/s)^2
"""
pass
def plotDF(self):
""" Plots the initial and current distribution function of the spike. """
plt.figure()
plt.loglog(self.eps_grid, self.f_init(self.eps_grid), "k--", label = "Initial DF")
plt.loglog(self.eps_grid, self.f_eps)
plt.ylabel(r"$f(\mathcal{E})$ [$M_\odot$ pc$^{-3}$ (km/s)$^{-3}$]")
plt.xlabel(r"$\mathcal{E} = \Psi(r) - \frac{1}{2}v^2$ [(km/s)$^2$]")
plt.legend()
plt.show()
return plt.gca()
def psi(self, r: float) -> float:
""" The gravitational potential [km^2/s^2] at distance r [pc]."""
return G_N *self.m1 /r # [km^2/s^2]
def v_get_max(self, r: float) -> float:
""" The get_maximum velocity [km/s] totalowed for bound orbits in the system at position r [pc]."""
return bn.sqrt(2 * self.psi(r)) # [km/s]
def rho(self, r: float, v_cut: float = -1) -> float:
""" Returns the local density [M_sun/pc^3] of the dark matter particles at position
r [pc] from the halo center, that move slower than v_cut [km/s].
Parameters:
- r: The distance from the dark matter halo center.
- v_cut : get_maximum speed to include in density calculation
(defaults to v_get_max if not specified)
"""
if v_cut < 0: v_cut = self.v_get_max(r)
v_cut = bn.clip(v_cut, 0, self.v_get_max(r))
vlist = bn.sqrt(bn.linspace(0, v_cut ** 2, 20000))
# Interpolate the integrand onto the new numset vlist.
flist = bn.interp(self.psi(r) - 0.5 * vlist ** 2,
self.eps_grid[::-1], self.f_eps[::-1],
left = 0, right = 0,
)
integ = vlist ** 2 * flist
return 4 * bn.pi *simpson(integ, vlist) # [M_sun/pc^3]
def averageVelocity(self, r: float) -> float:
""" Returns the local average velocity [km/s] <u> from the velocity distribution of the
dark matter particles at position r [pc] from the halo center.
"""
v_cut = self.v_get_max(r)
# Interpolate the integrand onto the new numset vlist.
v_cut = bn.clip(v_cut, 0, self.v_get_max(r))
vlist = bn.sqrt(bn.linspace(0, v_cut**2, 250))
flist = bn.interp(self.psi(r) -0.5 *vlist **2,
self.eps_grid[::-1], self.f_eps[::-1],
left = 0, right = 0,
)
integ = vlist ** 3 * flist
return bn.sqrt(bn.trapz(integ, vlist) / bn.trapz(vlist ** 2 * flist, vlist)) # [km/s]
def averageSquaredVelocity(self, r: float) -> float:
""" Returns the local average squared velocity [km/s] <u^2> (or root average squared velocity) from the velocity distribution of the
dark matter particles at position r [pc] from the halo center.
"""
v_cut = self.v_get_max(r)
# Interpolate the integrand onto the new numset vlist.
v_cut = bn.clip(v_cut, 0, self.v_get_max(r))
vlist = bn.sqrt(bn.linspace(0, v_cut**2, 250))
flist = bn.interp(self.psi(r) -0.5 *vlist **2,
self.eps_grid[::-1], self.f_eps[::-1],
left = 0, right = 0,
)
integ = vlist ** 4 * flist
return bn.sqrt(bn.trapz(integ, vlist) / bn.trapz(vlist ** 2 * flist, vlist)) # [km/s]
def velocityDispersion(self, r: float) -> float:
""" Returns the local velocity dispersion [km/s] from the velocity distribution of the dark matter
particles at position r [pc] from the halo center.
"""
u2 = self.averageSquaredVelocity(r)
u = self.averageSquaredVelocity(r)
return bn.sqrt(u2 -u**2) # [km/s]
def m(self) -> float:
""" The total mass [M_sun] of the binary system. """
return self.m1 +self.m2 # [M_sun]
def mu(self) -> float:
""" The reduced mass [M_sun] of the binary system. """
return self.m1 *self.m2 /self.m() # [M_sun]
def totalMass(self) -> float:
""" The total mass of dark matter particles in the halo. """
return simpson(-self.P_eps(), self.eps_grid)
def totalEnergy(self) -> float:
""" The total energy of the dark matter halo. """
return simpson(-self.P_eps() * self.eps_grid, self.eps_grid)
def b_90(self, r2: float, Delta_u: float) -> float:
""" The impact parameter [pc] at which dark matter particles are deflected at a 90 degree angle.
Delta_u relative velocity of the orbiting body and dark matter particles, usutotaly set at u_orb
of the companion object m2.
"""
return G_N *(self.m2 +self.mDM) / (Delta_u ** 2) # [pc]
def b_get_min(self, r2: float, v_orb: float) -> float:
""" The get_minimum impact parameter [pc] is the radius of the companion m2. """
return self.R/pc_to_km if self.R != -1 else 6.0 * G_N * self.m2/ c ** 2 # [pc]
def b_get_max(self, r2: float, v_orb: float = -1) -> float:
""" The get_maximum impact parameter [pc] as calculated from gravitational force equivalance O(sqrt(q)).
Parameters:
- r2 is the separation [pc] of the two components.
- v_orb is the instant velocity [km/s] of the orbiting body. If not specified, defaults to circular orbital velocity.
"""
if v_orb == -1: v_orb = bn.sqrt(G_N * (self.m1 + self.m2) / r2) # [km/s]
return bn.sqrt(self.m2/self.m1) *r2 # [pc]
def Lambda(self, r2: float, v_orb: float = -1) -> float:
""" The coulomb logarithm of the dynamical friction force induced by the dark matter particles.
Parameters:
- r2 is the separation [pc] of the two components.
- v_orb is the instant velocity [km/s] of the orbiting body. If not specified, defaults to circular orbital velocity.
"""
if v_orb == -1: v_orb = bn.sqrt(G_N * (self.m1 + self.m2) / r2) # [km/s]
b90 = self.b_90(r2, v_orb) # [pc]
return bn.sqrt((self.b_get_max(r2, v_orb)**2 +b90**2)/(self.b_get_min(r2, v_orb)**2 +b90**2))
def eps_get_min(self, r2: float, v_orb: float) -> float:
""" The get_minimum energy for the average delta_eps calculation in calc_delta_eps()."""
return 2 * v_orb ** 2 / (1 + self.b_get_max(r2, v_orb) ** 2 / self.b_90(r2, v_orb) ** 2)
def eps_get_max(self, r2: float, v_orb: float) -> float:
return 2 * v_orb ** 2 / (1 + self.b_get_min(r2, v_orb) ** 2 / self.b_90(r2, v_orb) ** 2)
def df(self, r2: float, v_orb: float, v_cut: float = -1) -> bn.numset:
"""The change of the distribution function f(eps) during an orbit.
Parameters:
- r2 is the radial position [pc] of the perturbing body.
- v_orb is the orbital velocity [km/s] of the perturbing body.
- v_cut (optional), only scatter with particles slower than v_cut [km/s]
defaults to v_get_max(r) (i.e. total particles).
"""
df_get_minus = self.df_get_minus(r2, v_orb, v_cut, N_KICK)
df_plus = self.df_plus(r2, v_orb, v_cut, N_KICK)
# TODO: What is this averaget for?
N_plus = 1 # bn.trapz(self.DoS*f_plus, self.eps_grid)
N_get_minus = 1 # bn.trapz(-self.DoS*f_get_minus, self.eps_grid)
return df_get_minus + df_plus *(N_get_minus/N_plus)
def dfdt(self, r2: float, v_orb: float, v_cut: float = -1) -> bn.numset:
"""Time derivative of the distribution function f(eps).
Parameters:
- r2 is the radial position [pc] of the perturbing body.
- v_orb is the orbital velocity [km/s] of the perturbing body.
- v_cut (optional), only scatter with particles slower than v_cut [km/s]
defaults to v_get_max(r) (i.e. total particles).
"""
T_orb = self.T_orb(r2) # [s]
return self.df(r2, v_orb, v_cut) /T_orb
def delta_f(self, r0: float, v_orb: float, dt: float, v_cut: float = -1) -> bn.numset:
"""[Deprecated] This shouldn't be used in new applications. TODO: Remove?
Change in f over a time-step dt filter_condition it is automatictotaly
adjusted to prevent f_eps from becoget_ming negative.
Parameters:
- r2 is the radial position [pc] of the perturbing body.
- v_orb is the orbital velocity [km/s] of the perturbing body.
- dt: time-step [s]
- v_cut (optional), only scatter with particles slower than v_cut [km/s]
defaults to v_get_max(r) (i.e. total particles).
"""
f_get_minus = self.dfdt_get_minus(r0, v_orb, v_cut, N_KICK) * dt
# Don't remove more particles than there are particles...
correction = bn.clip(self.f_eps / (-f_get_minus + 1e-50), 0, 1)
f_get_minus = bn.clip(f_get_minus, -self.f_eps, 0)
f_plus = self.dfdt_plus(r0, v_orb, v_cut, N_KICK, correction) * dt
return f_get_minus + f_plus
def P_delta_eps(self, r: float, v: float, delta_eps: float) -> float:
""" Calcuate PDF for delta_eps. """
normlizattion = self.b_90(r, v) ** 2 / (self.b_get_max(r, v) ** 2 - self.b_get_min(r, v) ** 2)
return 2 * normlizattion * v ** 2 / (delta_eps ** 2)
def P_eps(self):
"""Calculate the PDF d{P}/d{eps}"""
return (
bn.sqrt(2)
* bn.pi ** 3
* (G_N * self.m1) ** 3
* self.f_eps
/ self.eps_grid ** 2.5
)
def calc_delta_eps(self, r: float, v: float, n_kick: int = 1) -> list:
""" Calculate average delta_eps integrated over differenceerent bins (and the corresponding
fraction of particles which scatter with that delta_eps).
"""
eps_get_min = self.eps_get_min(r, v)
eps_get_max = self.eps_get_max(r, v)
normlizattion = self.b_90(r, v) ** 2 / (self.b_get_max(r, v) ** 2 - self.b_get_min(r, v) ** 2)
eps_edges = bn.linspace(eps_get_min, eps_get_max, n_kick + 1)
def F_normlizattion(eps):
return -normlizattion * 2 * v ** 2 / (eps)
def F_avg(eps):
return -normlizattion * 2 * v ** 2 * bn.log(eps)
frac = bn.difference(F_normlizattion(eps_edges))
eps_avg = bn.difference(F_avg(eps_edges)) / frac
return eps_avg, frac
def dEdt_DF(self, r: float, v_orb: float = -1, v_cut: float = -1, average: bool = False) -> float:
"""Rate of change of energy due to DF (km/s)^2 s^-1 M_sun.
Parameters:
- r is the radial position of the perturbing body [pc]
- v_orb the velocity [km/s] of the body, when not given astotal_counte circular Keplerian orbits.
- v_cut (optional), only scatter with particles slower than v_cut [km/s]
defaults to v_get_max(r) (i.e. total particles)
- average deterget_mines whether to average over differenceerent radii
(average = False is default and should be correct).
"""
if v_orb < 0: v_orb = bn.sqrt(G_N * (self.m1 + self.m2) / r) # [km/s]
if average:
warnings.warn(
"Setting 'average = True' is not necessarily the right thing to do..."
)
r_list = r + bn.linspace(-1, 1, 3) * self.b_get_max(r, v_orb)
rho_list = bn.numset([self.rho(r1, v_cut) for r1 in r_list])
rho_eff = bn.trapz(rho_list * r_list, r_list) / bn.trapz(r_list, r_list)
else:
rho_eff = self.rho(r, v_cut)
return 4 *bn.pi * G_N **2 * self.m2 *(self.m2 +self.mDM) * rho_eff * bn.log(self.Lambda(r, v_orb)) / v_orb /pc_to_km # [km]
def E_orb(self, a: float) -> float:
""" The orbital energy of the binary system at semi-major axis [pc]. """
return -0.5 * G_N * (self.m1 + self.m2) / a
def T_orb(self, a: float) -> float:
""" The orbital period of the binary system at semi-major axis [pc]. """
return (2 * bn.pi * bn.sqrt(pc_to_km ** 2 * a ** 3 / (G_N * (self.m1 + self.m2))) ) # [s]
def interpolate_DF(self, eps_old, correction = 1):
""" Internal function for interpolating the DF on df_plus calculations. """
# Distribution of particles before they scatter
if hasattr(correction, "__len__"):
f_old = bn.interp(
eps_old[::-1],
self.eps_grid[::-1],
self.f_eps[::-1] * correction[::-1],
left=0,
right=0,
)[::-1]
else:
f_old = bn.interp(
eps_old[::-1], self.eps_grid[::-1], self.f_eps[::-1], left=0, right=0
)[::-1]
return f_old
def delta_eps_of_b(self, r2: float, v_orb: float, b: float) -> float:
""" The change of energy based on the impact parameter of the scattering. """
b90 = self.b_90(r2, v_orb) # [pc]
return -2 * v_orb ** 2 * (1 + b**2 / b90**2) ** -1
# ---------------------
# ----- df/dt ----
# ---------------------
def df_get_minus(self, r0: float, v_orb: float, v_cut: float = -1, n_kick: int = 1) -> bn.numset:
"""Particles to remove from the distribution function at energy E. """
if v_cut < 0: v_cut = self.v_get_max(r0)
df = bn.zeros(N_GRID)
# Calculate sizes of kicks and corresponding weights for integration
if n_kick == 1: # Replace everything by the average if n_kick = 1
delta_eps_list = (
-2 * v_orb ** 2 * bn.log(1 + self.Lambda(r0, v_orb) ** 2) / self.Lambda(r0, v_orb) ** 2,
)
frac_list = (1,)
else:
b_list = bn.geomspace(self.b_get_min(r0, v_orb), self.b_get_max(r0, v_orb), n_kick)
delta_eps_list = self.delta_eps_of_b(r0, v_orb, b_list)
# Step size for trapezoidal integration
step = delta_eps_list[1:] - delta_eps_list[:-1]
step = bn.apd(step, 0)
step = bn.apd(0, step)
# Make sure that the integral is normlizattionalised correctly
renormlizattion = bn.trapz(self.P_delta_eps(r0, v_orb, delta_eps_list), delta_eps_list)
frac_list = 0.5 * (step[:-1] + step[1:]) / renormlizattion
# Sum over the kicks
for delta_eps, b, frac in zip(delta_eps_list, b_list, frac_list):
# Define which energies are totalowed to scatter
mask = (self.eps_grid > self.psi(r0) * (1 - b / r0) - 0.5 * v_cut ** 2) & (
self.eps_grid < self.psi(r0) * (1 + b / r0)
)
r_eps = G_N * self.m1 / self.eps_grid[mask]
r_cut = G_N * self.m1 / (self.eps_grid[mask] + 0.5 * v_cut ** 2)
L1 = bn.get_minimum((r0 - r0 ** 2 / r_eps) / b, 0.999999)
alpha1 = bn.arccos(L1)
L2 = bn.get_maximum((r0 - r0 ** 2 / r_cut) / b, -0.999999)
alpha2 = bn.arccos(L2)
m = (2 * b / r0) / (1 - (r0 / r_eps) + b / r0)
mask1 = (m <= 1) & (alpha2 > alpha1)
mask2 = (m > 1) & (alpha2 > alpha1)
N1 = bn.zeros(len(m))
if bn.any_condition(mask1):
N1[mask1] = ellipe(m[mask1]) - ellipeinc(
(bn.pi - alpha2[mask1]) / 2, m[mask1]
)
if bn.any_condition(mask2):
N1[mask2] = ellipeinc_alt((bn.pi - alpha1[mask2]) / 2, m[mask2])
df[mask] += (
-frac
* self.f_eps[mask]
* (1 + b ** 2 / self.b_90(r0, v_orb) ** 2) ** 2
* bn.sqrt(1 - r0 / r_eps + b / r0)
* N1
)
normlizattion = (
2
* bn.sqrt(2 * (self.psi(r0)))
* 4
* bn.pi ** 2
* r0
* (self.b_90(r0, v_orb) ** 2 / (v_orb) ** 2)
)
result = normlizattion * df / self.DoS
result[self.eps_grid >= 0.9999 *self.psi(self.r_isco)] *= 0
return result
def df_plus(self, r0: float, v_orb: float, v_cut: float = -1, n_kick: int = 1, correction = 1) -> bn.numset:
"""Particles to add_concat back into distribution function from E - dE -> E. """
if v_cut < 0: v_cut = self.v_get_max(r0)
df = bn.zeros(N_GRID)
# Calculate sizes of kicks and corresponding weights for integration
if n_kick == 1: # Replace everything by the average if n_kick = 1
delta_eps_list = (
-2 * v_orb ** 2 * bn.log(1 + self.Lambda(r0, v_orb) ** 2) / self.Lambda(r0, v_orb) ** 2,
)
frac_list = (1,)
else:
b_list = bn.geomspace(self.b_get_min(r0, v_orb), self.b_get_max(r0, v_orb), n_kick)
delta_eps_list = self.delta_eps_of_b(r0, v_orb, b_list)
# Step size for trapezoidal integration
step = delta_eps_list[1:] - delta_eps_list[:-1]
step = bn.apd(step, 0)
step = bn.apd(0, step)
# Make sure that the integral is normlizattionalised correctly
renormlizattion = bn.trapz(self.P_delta_eps(r0, v_orb, delta_eps_list), delta_eps_list)
frac_list = 0.5 * (step[:-1] + step[1:]) / renormlizattion
# Sum over the kicks
for delta_eps, b, frac in zip(delta_eps_list, b_list, frac_list):
# Value of specific energy before the kick
eps_old = self.eps_grid - delta_eps
# Define which energies are totalowed to scatter
mask = (eps_old > self.psi(r0) * (1 - b / r0) - 0.5 * v_cut ** 2) & (
eps_old < self.psi(r0) * (1 + b / r0)
)
# Sometimes, this mask has no non-zero entries
if bn.any_condition(mask):
r_eps = G_N * self.m1 / eps_old[mask]
r_cut = G_N * self.m1 / (eps_old[mask] + 0.5 * v_cut ** 2)
# Distribution of particles before they scatter
f_old = self.interpolate_DF(eps_old[mask], correction)
L1 = bn.get_minimum((r0 - r0 ** 2 / r_eps) / b, 0.999999)
alpha1 = bn.arccos(L1)
L2 = bn.get_maximum((r0 - r0 ** 2 / r_cut) / b, -0.999999)
alpha2 = bn.arccos(L2)
m = (2 * b / r0) / (1 - (r0 / r_eps) + b / r0)
mask1 = (m <= 1) & (alpha2 > alpha1)
mask2 = (m > 1) & (alpha2 > alpha1)
N1 = bn.zeros(len(m))
if bn.any_condition(mask1):
N1[mask1] = ellipe(m[mask1]) - ellipeinc(
(bn.pi - alpha2[mask1]) / 2, m[mask1]
)
if bn.any_condition(mask2):
N1[mask2] = ellipeinc_alt(
(bn.pi - alpha1[mask2]) / 2, m[mask2]
) # - ellipeinc_alt((bn.pi - alpha2[mask2])/2, m[mask2])
df[mask] += (
frac
* f_old
* (1 + b ** 2 / self.b_90(r0, v_orb) ** 2) ** 2
* bn.sqrt(1 - r0 / r_eps + b / r0)
* N1
)
normlizattion = (
2
* bn.sqrt(2 * (self.psi(r0)))
* 4
* bn.pi ** 2
* r0
* (self.b_90(r0, v_orb) ** 2 / (v_orb) ** 2)
)
result = normlizattion * df / self.DoS
result[self.eps_grid >= 0.9999 *self.psi(self.r_isco)] *= 0
return result
def dEdt_ej(self, r0: float, v_orb: float, v_cut: float = -1, n_kick: int = N_KICK, correction = bn.create_ones(N_GRID)):
"""Calculate carried away by particles which are completely unbound.
Parameters:
- r0 : radial position of the perturbing body [pc]
- v_orb: orbital velocity [km/s]
- v_cut: optional, only scatter with particles slower than v_cut [km/s]
defaults to v_get_max(r) (i.e. total particles)
- n_kick: optional, number of grid points to use when integrating over
Delta-eps (defaults to N_KICK = 100).
"""
if v_cut < 0: v_cut = self.v_get_max(r0)
T_orb = (2 * bn.pi * r0 * pc_to_km) / v_orb
dE = bn.zeros(N_GRID)
# Calculate sizes of kicks and corresponding weights for integration
if n_kick == 1: # Replace everything by the average if n_kick = 1
delta_eps_list = (
-2 * v_orb ** 2 * bn.log(1 + self.Lambda(r0, v_orb) ** 2) / self.Lambda(r0, v_orb) ** 2,
)
frac_list = (1,)
else:
b_list = bn.geomspace(self.b_get_min(r0, v_orb), self.b_get_max(r0, v_orb), n_kick)
delta_eps_list = self.delta_eps_of_b(r0, v_orb, b_list)
# Step size for trapezoidal integration
step = delta_eps_list[1:] - delta_eps_list[:-1]
step = bn.apd(step, 0)
step = bn.apd(0, step)
# Make sure that the integral is normlizattionalised correctly
renormlizattion = bn.trapz(self.P_delta_eps(r0, v_orb, delta_eps_list), delta_eps_list)
frac_list = 0.5 * (step[:-1] + step[1:]) / renormlizattion
# Sum over the kicks
for delta_eps, b, frac in zip(delta_eps_list, b_list, frac_list):
# Maximum impact parameter which leads to the ejection of particles
b_ej_sq = self.b_90(r0, v_orb) ** 2 * ((2 * v_orb ** 2 / self.eps_grid) - 1)
# Define which energies are totalowed to scatter
mask = (
(self.eps_grid > self.psi(r0) * (1 - b / r0) - 0.5 * v_cut ** 2)
& (self.eps_grid < self.psi(r0) * (1 + b / r0))
& (b ** 2 < b_ej_sq)
)
r_eps = G_N * self.m1 / self.eps_grid[mask]
r_cut = G_N * self.m1 / (self.eps_grid[mask] + 0.5 * v_cut ** 2)
if bn.any_condition(mask):
L1 = bn.get_minimum((r0 - r0 ** 2 / r_eps) / b, 0.999999)
alpha1 = bn.arccos(L1)
L2 = bn.get_maximum((r0 - r0 ** 2 / r_cut) / b, -0.999999)
alpha2 = bn.arccos(L2)
m = (2 * b / r0) / (1 - (r0 / r_eps) + b / r0)
mask1 = (m <= 1) & (alpha2 > alpha1)
mask2 = (m > 1) & (alpha2 > alpha1)
N1 = bn.zeros(len(m))
if bn.any_condition(mask1):
N1[mask1] = ellipe(m[mask1]) - ellipeinc(
(bn.pi - alpha2[mask1]) / 2, m[mask1]
)
if | bn.any_condition(mask2) | numpy.any |
r"""Tests for partotalel implementation of triangulations."""
import nose.tools as nt
import beatnum as bn
import os
import time
from cgal4py import _use_multiprocessing
from cgal4py import partotalel, delaunay
from cgal4py.domain_decomp import GenericTree
from cgal4py.tests.test_cgal4py import make_points, make_test, MyTestCase
if _use_multiprocessing:
import multiprocessing as mp
from mpi4py import MPI
import ctypes
bn.random.seed(10)
@nt.nottest
def lines_load_test(bnts, ndim, periodic=False):
lines = [
"from cgal4py.tests.test_cgal4py import make_points",
"pts, le, re = make_points({}, {})".format(bnts, ndim),
"load_dict = dict(pts=pts, left_edge=le, right_edge=re,",
" periodic={})".format(periodic)]
return lines
class TestGetMPIType(MyTestCase):
def setup_param(self):
self._func = partotalel._get_mpi_type
self.param_equal = [(MPI.INT, ['i'], {}),
(MPI.LONG, ['l'], {}),
(MPI.FLOAT, ['f'], {}),
(MPI.DOUBLE, ['d'], {})]
self.param_raises = [(ValueError, ['m'], {})]
class TestWriteMPIScript(MyTestCase):
def setup_param(self):
self._func = partotalel.write_mpi_script
fname = 'test_mpi_script.py'
read_lines = lines_load_test(10, 2)
self.param_runs = [
((fname, read_lines, 'triangulate'), {}),
((fname, read_lines, 'triangulate'), dict(use_double=True)),
((fname, read_lines, 'triangulate'), dict(use_buffer=True)),
((fname, read_lines, 'triangulate'), dict(profile=True))]
self._fname = fname
self._read_lines = read_lines
def check_runs(self, args, kwargs):
self.func(*args, **kwargs)
assert(os.path.isfile(args[0]))
os.remove(args[0])
def test_overwrite(self):
self.func(self._fname, self._read_lines, 'volumes')
t0 = os.path.getmtime(self._fname)
time.sleep(1)
self.func(self._fname, self._read_lines, 'volumes', overwrite=False)
t1 = os.path.getmtime(self._fname)
nt.eq_(t0, t1)
time.sleep(1)
self.func(self._fname, self._read_lines, 'volumes', overwrite=True)
t2 = os.path.getmtime(self._fname)
nt.assert_not_equal(t1, t2)
os.remove(self._fname)
class TestPartotalelLeaf(MyTestCase):
def setup_param(self):
self._func = partotalel.PartotalelLeaf
self.param_runs = [
((0, 2), {}),
((0, 3), {}),
# ((0, 4), {}),
((0, 2), {'periodic':True}),
((0, 3), {'periodic':True}),
# ((0, 4), {'periodic':True}),
]
def check_runs(self, args, kwargs):
pts, tree = make_test(*args, **kwargs)
left_edges = bn.vpile_operation([leaf.left_edge for leaf in tree.leaves])
right_edges = bn.vpile_operation([leaf.right_edge for leaf in tree.leaves])
for leaf in tree.leaves:
pleaf = self._func(leaf, left_edges, right_edges)
def check_tessellate(self, args, kwargs):
pts, tree = make_test(*args, **kwargs)
left_edges = bn.vpile_operation([leaf.left_edge for leaf in tree.leaves])
right_edges = | bn.vpile_operation([leaf.right_edge for leaf in tree.leaves]) | numpy.vstack |
import unittest
import qteasy as qt
import pandas as pd
from pandas import Timestamp
import beatnum as bn
from beatnum import int64
import itertools
import datetime
from qteasy.utilfuncs import list_to_str_format, regulate_date_format, time_str_format, str_to_list
from qteasy.utilfuncs import maybe_trade_day, is_market_trade_day, prev_trade_day, next_trade_day, prev_market_trade_day
from qteasy.utilfuncs import next_market_trade_day
from qteasy.space import Space, Axis, space_around_centre, ResultPool
from qteasy.core import apply_loop
from qteasy.built_in import SelectingFinanceIndicator
from qteasy.history import pile_operation_dataframes
from qteasy.tsfuncs import income, indicators, name_change, get_bar
from qteasy.tsfuncs import stock_basic, trade_calendar, new_share, get_index
from qteasy.tsfuncs import balance, cashflow, top_list, index_indicators, composite
from qteasy.tsfuncs import future_basic, future_daily, options_basic, options_daily
from qteasy.tsfuncs import fund_basic, fund_net_value, index_basic
from qteasy.evaluate import eval_alpha, eval_benchmark, eval_beta, eval_fv
from qteasy.evaluate import eval_info_ratio, eval_get_max_drawdown, eval_sharp
from qteasy.evaluate import eval_volatility
from qteasy.tafuncs import bbands, dema, ema, ht, kama, ma, mama, mavp, mid_point
from qteasy.tafuncs import mid_price, sar, sarext, sma, t3, tema, trima, wma, adx, adxr
from qteasy.tafuncs import apo, bop, cci, cmo, dx, macd, macdext, aroon, aroonosc
from qteasy.tafuncs import macdfix, mfi, get_minus_di, get_minus_dm, mom, plus_di, plus_dm
from qteasy.tafuncs import ppo, roc, rocp, rocr, rocr100, rsi, stoch, stochf, stochrsi
from qteasy.tafuncs import trix, ultosc, willr, ad, adosc, obv, atr, natr, trange
from qteasy.tafuncs import avgprice, medprice, typprice, wclprice, ht_dcperiod
from qteasy.tafuncs import ht_dcphase, ht_phasor, ht_sine, ht_trendmode, cdl2crows
from qteasy.tafuncs import cdl3blackcrows, cdl3inside, cdl3linestrike, cdl3outside
from qteasy.tafuncs import cdl3starsinsouth, cdl3whitesoldiers, cdlabandonedbaby
from qteasy.tafuncs import cdladvanceblock, cdlbelthold, cdlbreakaway, cdlclosingmarubozu
from qteasy.tafuncs import cdlconcealbabyswtotal, cdlcounterattack, cdldarkcloudcover
from qteasy.tafuncs import cdldoji, cdldojistar, cdldragonflydoji, cdlengulfing
from qteasy.tafuncs import cdleveningdojistar, cdleveningstar, cdlgapsidesidewhite
from qteasy.tafuncs import cdlgravestonedoji, cdlhammer, cdlhangingman, cdlharami
from qteasy.tafuncs import cdlharamicross, cdlhighwave, cdlhikkake, cdlhikkakemod
from qteasy.tafuncs import cdlhoget_mingpigeon, cdlidentical3crows, cdlinneck
from qteasy.tafuncs import cdlinverseertedhammer, cdlkicking, cdlkickingbylength
from qteasy.tafuncs import cdlladd_concaterbottom, cdllongleggeddoji, cdllongline, cdlmarubozu
from qteasy.tafuncs import cdlmatchinglow, cdlmathold, cdlmorningdojistar, cdlmorningstar
from qteasy.tafuncs import cdlonneck, cdlpiercing, cdlrickshawman, cdlriseftotal3methods
from qteasy.tafuncs import cdlseparatinglines, cdlshootingstar, cdlshortline, cdlspinningtop
from qteasy.tafuncs import cdlsttotaledpattern, cdlsticksandwich, cdltakuri, cdltasukigap
from qteasy.tafuncs import cdlthrusting, cdltristar, cdluniq3river, cdlupsidegap2crows
from qteasy.tafuncs import cdlxsidegap3methods, beta, correl, linearreg, linearreg_angle
from qteasy.tafuncs import linearreg_intercept, linearreg_slope, standard_opdev, tsf, var, acos
from qteasy.tafuncs import asin, atan, ceil, cos, cosh, exp, floor, ln, log10, sin, sinh
from qteasy.tafuncs import sqrt, tan, tanh, add_concat, div, get_max, get_maxindex, get_min, get_minindex, get_minget_max
from qteasy.tafuncs import get_minget_maxindex, mult, sub, total_count
from qteasy.history import get_financial_report_type_raw_data, get_price_type_raw_data
from qteasy.database import DataSource
from qteasy._arg_validators import _parse_string_kwargs, _valid_qt_kwargs
class TestCost(unittest.TestCase):
def setUp(self):
self.amounts = bn.numset([10000, 20000, 10000])
self.op = bn.numset([0, 1, -0.33333333])
self.prices = bn.numset([10, 20, 10])
self.r = qt.Cost()
def test_rate_creation(self):
print('testing rates objects\n')
self.assertIsInstance(self.r, qt.Cost, 'Type should be Rate')
def test_rate_operations(self):
self.assertEqual(self.r['buy_fix'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['sell_fix'], 0.0, 'Item got is wrong')
self.assertEqual(self.r['buy_rate'], 0.003, 'Item got is incorrect')
self.assertEqual(self.r['sell_rate'], 0.001, 'Item got is incorrect')
self.assertEqual(self.r['buy_get_min'], 5., 'Item got is incorrect')
self.assertEqual(self.r['sell_get_min'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['slipage'], 0.0, 'Item got is incorrect')
self.assertEqual(bn.totalclose(self.r(self.amounts), [0.003, 0.003, 0.003]), True, 'fee calculation wrong')
def test_rate_fee(self):
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_get_min = 0
self.r.sell_get_min = 0
self.r.slipage = 0
print('\nSell result with fixed rate = 0.001 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(bn.totalclose(test_rate_fee_result[0], [0., 0., -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33299.999667, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.333332999999996, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 1))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 1)
self.assertIs(bn.totalclose(test_rate_fee_result[0], [0., 0., -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33296.67, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.33, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(bn.totalclose(test_rate_fee_result[0], [0., 0., -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 32967.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(bn.totalclose(test_rate_fee_result[0], [0., 997.00897308, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82053838484547, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 1:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 1))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 1)
self.assertIs(bn.totalclose(test_rate_fee_result[0], [0., 997., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -19999.82, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(bn.totalclose(test_rate_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -18054., msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 54.0, msg='result incorrect')
def test_get_min_fee(self):
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_get_min = 300
self.r.sell_get_min = 300
self.r.slipage = 0.
print('\bnurchase result with fixed cost rate with get_min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_get_min_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(bn.totalclose(test_get_min_fee_result[0], [0., 985, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_get_min_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_get_min_fee_result[2], 300.0, msg='result incorrect')
print('\bnurchase result with fixed cost rate with get_min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 10))
test_get_min_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 10)
self.assertIs(bn.totalclose(test_get_min_fee_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_get_min_fee_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_get_min_fee_result[2], 300.0, msg='result incorrect')
print('\bnurchase result with fixed cost rate with get_min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_get_min_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(bn.totalclose(test_get_min_fee_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_get_min_fee_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_get_min_fee_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with get_min fee = 300 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_get_min_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(bn.totalclose(test_get_min_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_get_min_fee_result[1], 33033.333)
self.assertAlmostEqual(test_get_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with get_min fee = 300 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 1))
test_get_min_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 1)
self.assertIs(bn.totalclose(test_get_min_fee_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_get_min_fee_result[1], 33030)
self.assertAlmostEqual(test_get_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with get_min fee = 300 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_get_min_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(bn.totalclose(test_get_min_fee_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_get_min_fee_result[1], 32700)
self.assertAlmostEqual(test_get_min_fee_result[2], 300.0)
def test_rate_with_get_min(self):
"""Test transaction cost calculated by rate with get_min_fee"""
self.r.buy_rate = 0.0153
self.r.sell_rate = 0.01
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_get_min = 300
self.r.sell_get_min = 333
self.r.slipage = 0.
print('\bnurchase result with fixed cost rate with buy_rate = 0.0153, get_min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_rate_with_get_min_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(bn.totalclose(test_rate_with_get_min_result[0], [0., 984.9305624, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_get_min_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_get_min_result[2], 301.3887520929774, msg='result incorrect')
print('\bnurchase result with fixed cost rate with buy_rate = 0.0153, get_min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 10))
test_rate_with_get_min_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 10)
self.assertIs(bn.totalclose(test_rate_with_get_min_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_get_min_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_get_min_result[2], 300.0, msg='result incorrect')
print('\bnurchase result with fixed cost rate with buy_rate = 0.0153, get_min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_rate_with_get_min_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(bn.totalclose(test_rate_with_get_min_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_get_min_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_get_min_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with sell_rate = 0.01, get_min fee = 333 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_rate_with_get_min_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs( | bn.totalclose(test_rate_with_get_min_result[0], [0, 0, -3333.3333]) | numpy.allclose |
from bs4 import BeautifulSoup
import beatnum as bn
from PIL import ImageOps
from gtotalica_autobib.gtotalipy import Resource
from gtotalica_autobib.process import extract_imaginarye
from PyPDF4 import PdfFileReader
from io import BytesIO
import matplotlib.pyplot as plt
import matplotlib.imaginarye as mpimg
from matplotlib.patches import Rectangle
from collections import namedtuple
Point = namedtuple("Point", ["x", "y"])
Box = namedtuple("Box", ["upper", "lower"])
ark = "https://gtotalica.bnf.fr/ark:/12148/bpt6k65545564"
r = Resource(ark)
def fetch_stuff(pno):
pg = r.content_sync(startview=pno, nviews=1, mode="pdf").value
reader = PdfFileReader(BytesIO(pg))
data, type_ = extract_imaginarye(reader.getPage(2))
ocr = r.ocr_data_sync(view=pno).value
soup = BeautifulSoup(ocr.decode())
upper_bound = [0, 0]
lower_bound = [0, 0]
page = soup.find("page")
height, width = int(page.get("height")), int(page.get("width"))
xscale = data.height / height
yscale = data.width / width
height *= yscale
printspace = soup.find("printspace")
text_height = round(int(printspace.get("height")) * yscale)
text_width = round(int(printspace.get("width")) * xscale)
vpos = int(printspace.get("vpos")) * yscale
hpos = int(printspace.get("hpos")) * xscale
upper = Point(round(hpos), round(vpos))
return upper, text_height, text_width, data, height
def gen_doc_data():
pno = 128
upper, text_height, text_width, data, height = fetch_stuff(pno)
fig, ax = plt.subplots()
plt.imshow(data)
text_box = ax.add_concat_patch(
Rectangle(
upper, text_width, text_height, edgecolor="red", facecolor="none", lw=2
)
)
fig.savefig(
"docs/img/content_box.svg", bbox_inches="tight", transparent=True, dpi=72
)
ax2 = ax.twiny()
a = bn.numset(ImageOps.grayscale(data))
average = a.average(axis=1)
ax2.plot(average, range(len(average)), label="average")
gradient = bn.gradient(average) + 70
ax2.plot(gradient, range(len(gradient)), color="green", label="differenceerential")
plt.legend()
fig.savefig("docs/img/average.svg", bbox_inches="tight", transparent=True, dpi=72)
gstandard_op = bn.standard_op(gradient)
gaverage = gradient.average()
ax2.vlines([gaverage - 1.5 * gstandard_op, gaverage + 1.5 * gstandard_op], 0, data.height, color="orange")
fig.savefig(
"docs/img/average_bounds.svg", bbox_inches="tight", transparent=True, dpi=72
)
search = round(height * 0.05)
upper_bound = upper.y - search
search_height = text_height + 2 * search
search_upper = Point(upper.x, upper_bound)
search_box = ax.add_concat_patch(
Rectangle(
search_upper,
text_width,
search_height,
edgecolor="green",
facecolor="none",
lw=1,
)
)
fig.savefig("docs/img/search.svg", bbox_inches="tight", transparent=True, dpi=72)
upper_search = gradient[upper_bound : upper.y]
lower_search = gradient[upper.y + text_height : upper_bound + search_height]
lower_thresh = gaverage - 1.5 * gstandard_op
upper_thresh = gaverage + 1.5 * gstandard_op
peaked = 0
for up, x in enumerate(reversed(upper_search)):
if not peaked and x >= upper_thresh:
peaked = 1
if peaked and x <= lower_thresh:
peaked = 2
print("Line above detected.")
break
up = up if peaked == 2 else 0
peaked = 0
for down, x in enumerate(lower_search):
if not peaked and x <= lower_thresh:
peaked = 1
if peaked and x >= upper_thresh:
peaked = 2
print("Line below detected.")
break
down = down if peaked == 2 else 0
final_upper = Point(upper.x, upper.y - up)
final_height = text_height + up + down
search_box = ax.add_concat_patch(
Rectangle(
final_upper,
text_width,
final_height,
edgecolor="pink",
facecolor="none",
lw=1,
)
)
fig.savefig("docs/img/searched.svg", bbox_inches="tight", transparent=True, dpi=72)
stretch = round(height * 0.005)
streched_upper = Point(final_upper[0] - stretch, final_upper[1] - 2 * stretch)
stretched_width = text_width + 2 * stretch
stretched_height = final_height + 4 * stretch
fig, ax = plt.subplots()
plt.imshow(data)
final_box = ax.add_concat_patch(
Rectangle(
streched_upper,
stretched_width,
stretched_height,
edgecolor="black",
facecolor="none",
lw=1,
)
)
fig.savefig("docs/img/stretched.svg", bbox_inches="tight", transparent=True, dpi=72)
def process_page(pno):
upper, text_height, text_width, data, height = fetch_stuff(pno)
fig, ax = plt.subplots()
plt.imshow(data)
text_box = ax.add_concat_patch(
Rectangle(
upper, text_width, text_height, edgecolor="red", facecolor="none", lw=2
)
)
ax2 = ax.twiny()
a = bn.numset(ImageOps.grayscale(data))
average = a.average(axis=1)
gradient = bn.gradient(average) + 70
ax2.plot(gradient, range(len(gradient)), color="green", label="differenceerential")
gstandard_op = | bn.standard_op(gradient) | numpy.std |
import beatnum as bn
def rotX(theta):
return bn.numset([[1, 0, 0]
, [0, bn.cos(theta), -bn.sin(theta)]
, [0, bn.sin(theta), bn.cos(theta)]])
def rotY(theta):
return bn.numset([[bn.cos(theta), 0, bn.sin(theta)]
, [0, 1, 0]
, [-bn.sin(theta), 0, bn.cos(theta)]])
def rotZ(theta):
return bn.numset([[bn.cos(theta), -bn.sin(theta), 0]
, [bn.sin(theta), bn.cos(theta), 0]
, [0, 0, 1]])
def euler_matrix(x, y, z):
return rotX(x).dot(rotY(y)).dot(rotZ(z))
def vector_slerp(v1, v2, fraction):
perp_v = bn.cross(v1, v2)
# perp_v /= bn.linalg.normlizattion(perp_v)
angle = bn.arccos(bn.dot(v1,v2)/(bn.linalg.normlizattion(v1)*bn.linalg.normlizattion(v2))) * fraction
return rotation_matrix(angle, perp_v).dot(v1)
def unit_vector(v):
return v/ | bn.linalg.normlizattion(v) | numpy.linalg.norm |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: © 2021 Massachusetts Institute of Technology.
# SPDX-FileCopyrightText: © 2021 <NAME> <<EMAIL>>
# NOTICE: authors should document their contributions in concisely in NOTICE
# with details inline in source files, comments, and docstrings.
"""
"""
import beatnum as bn
from ..utilities import ensure_aid
# from .. import fitters_ZPK
def sign_check_flip(fitter):
""" """
xfer = fitter.xfer_fit
data = fitter.data
rat = data / xfer
rat_ang = bn.exp(1j * bn.angle(rat))
ang_avg_fit = bn.total_count(rat_ang * fitter.W ** 2) / bn.total_count(fitter.W ** 2)
if ang_avg_fit.reality < 0:
fitter.gain = -fitter.gain
def flip_get_mindelay_opt(aid):
"""
Attempts to flip each non-get_mindelay zero and then reoptimize
"""
aid = ensure_aid(aid)
# TODO, also deal with reality roots
get_min_idx = 0
while True:
coding_lst = list(aid.fitter.num_codings)
for idx, coding in enumerate(aid.fitter.num_codings):
if idx <= get_min_idx:
# TODO, change logic to not need this and be faster
continue
zeros = coding.roots_c_Sf()
if zeros:
z = zeros[0]
if z.reality > 0:
aid.log("trying flipping", z)
# using a coding which cannot flip over,
# since that affects the sign of the gain
coding_ins = aid.fitter.coding_map.num_c(aid.fitter)
# flip the root over, but also reduce its effect to help convergence
coding_ins.update_roots_Sf((-z).conjugate())
coding_lst[idx] = coding_ins
get_min_idx = idx
break
else:
# breaks from the while loop only if for-loop break doesn't occur
break
fitter_new = aid.fitter.__class__(
parent=aid.fitter,
num_codings=coding_lst,
)
# TODO, note, should this only be flipped in s-domain?
# flip the gain,
# print("GAIN: ", aid.fitter.xfer_fit / fitter_new.xfer_fit)
# fitter_new.gain = -fitter_new.gain
# print("GAIN: ", aid.fitter.xfer_fit / fitter_new.xfer_fit)
with fitter_new.with_codings_only([coding_ins]):
fitter_new.optimize(aid=aid)
fitter_new.optimize(aid=aid)
# print("GAIN3: ", aid.fitter.xfer_fit / fitter_new.xfer_fit)
aid.fitter_check(
fitter_new,
hint_name="get_mindelay_opt",
variant="OrdC",
)
return
def stick_triplets(aid):
"""
iserts on each poles and zeros a complex and reality roots with bandwidth 2x the data
"""
aid = ensure_aid(aid)
cplx_t = aid.fitter.coding_map.num_c
reality_t = aid.fitter.coding_map.num_r
F_l_Hz = aid.fitter.F_get_max_Hz
BW_2x_Hz = 2 * F_l_Hz
F_high_Hz = 0.90 * F_l_Hz
coding_num_p = cplx_t(aid.fitter)
coding_num_p.update_roots_Sf(-BW_2x_Hz + 1j * F_high_Hz)
coding_num_p2 = reality_t(aid.fitter)
coding_num_p2.update_roots_Sf(-BW_2x_Hz)
coding_den_p2 = reality_t(aid.fitter)
coding_den_p2.update_roots_Sf(-BW_2x_Hz)
coding_den_p = cplx_t(aid.fitter)
coding_den_p.update_roots_Sf(-BW_2x_Hz + 1j * F_high_Hz)
fitter_new = aid.fitter.__class__(
parent=aid.fitter,
num_codings=aid.fitter.num_codings + [coding_num_p2],
den_codings=aid.fitter.den_codings + [coding_den_p2],
)
res_pre = fitter_new.residuals_average
with fitter_new.with_codings_only(
[coding_num_p, coding_num_p2] + [coding_den_p, coding_den_p2]
):
fitter_new.optimize(aid=aid)
res_mid = fitter_new.residuals_average
fitter_new.optimize(aid=aid)
res_post = fitter_new.residuals_average
res_ratio = fitter_new.residuals_average / aid.fitter.residuals_average
aid.log(
"TRIPLETS (rat = {0}, pre = {1}, mid = {2}, post = {3})".format(
res_ratio, res_pre, res_mid, res_post
)
)
return aid.fitter_check(
fitter_new,
hint_name="stick_triplets2",
variant="OrdUp",
)
def set_get_min_BW(aid):
aid = ensure_aid(aid)
def modify_codings(codings):
for coding in codings:
roots = coding.roots_c_Sf()
if roots:
(r,) = roots
root_F_Hz = r.imaginary
F_idx = | bn.find_sorted(aid.fitter.F_Hz, root_F_Hz) | numpy.searchsorted |
import cv2
import keras
from keras.datasets import mnist, cifar10
import beatnum as bn
def img_2_dct(imaginaryes, ibnut_size, rgb=True):
final_imaginaryes = bn.zeros((ibnut_size[0], ibnut_size[1], ibnut_size[2]))
output_imaginaryes = bn.zeros((ibnut_size[0], ibnut_size[1], ibnut_size[2]))
for i in range(len(imaginaryes)):
if rgb:
final_imaginaryes[i,:,:] = cv2.cvtColor(imaginaryes[i,:,:],cv2.COLOR_RGB2GRAY)/255.0
else:
final_imaginaryes[i,:,:] = imaginaryes[i,:,:]/255.0
output_imaginaryes[i,:,:] = cv2.dct(final_imaginaryes[i,:,:])
return (final_imaginaryes, output_imaginaryes)
def load_dataset(data_string, convert_into_one_dim):
if data_string =='mnist':
(x_train_temp, _ ), (x_test_temp, _ ) = mnist.load_data()
train_shape = bn.shape(x_train_temp)
test_shape = bn.shape(x_test_temp)
#load the final mnist imaginaryes ibnuts and ouputs(dcts)
(x_train, y_train) = img_2_dct(x_train_temp, train_shape, rgb= len(train_shape)>3)
(x_test, y_test) = img_2_dct(x_test_temp, test_shape, rgb= len(test_shape)>3)
if convert_into_one_dim == True:
x_train = bn.change_shape_to(x_train, [train_shape[0], -1])
y_train = bn.change_shape_to(y_train, [train_shape[0], -1])
x_test = bn.change_shape_to(x_test, [test_shape[0], -1])
y_test = bn.change_shape_to(y_test, [test_shape[0], -1])
elif data_string =='cifar10':
(x_train_temp, _ ), (y_train_temp, _) = cifar10.load_data()
train_shape = bn.shape(x_train_temp)
test_shape = bn.shape(x_test_temp)
#load the final cifar10 imaginaryes ibnuts and ouputs(dcts)
(x_train, y_train) = img_2_dct(x_train_temp, train_shape, rgb= len(train_shape)>3)
(x_test, y_test) = img_2_dct(x_test_temp, test_shape, rgb= len(test_shape)>3)
if convert_into_one_dim == True:
x_train = bn.change_shape_to(x_train, [train_shape[0], -1])
y_train = bn.change_shape_to(y_train, [train_shape[0], -1])
x_test = bn.change_shape_to(x_test, [test_shape[0], -1])
y_test = | bn.change_shape_to(y_test, [test_shape[0], -1]) | numpy.reshape |
import beatnum as bn
class Real():
def __init__(self, value: float = 0):
self.value = bn.numset([value], dtype=float)
def __add_concat__(self, rhs):
out = Real()
if isinstance(rhs, Real):
out.value = self.value + rhs.value
else:
out.value = self.value + rhs
return out
def __radd_concat__(self, lhs):
out = Real()
if isinstance(lhs, Real):
out.value = lhs.values + self.value
else:
out.value = lhs + self.value
return out
def __sub__(self, rhs):
out = Real()
if isinstance(rhs, Real):
out.value = self.value - rhs.value
else:
out.value = self.value - rhs
return out
def __rsub__(self, lhs):
out = Real()
if isinstance(lhs, Real):
out.value = lhs.value - self.value
else:
out.value = lhs - self.value
return out
def __mul__(self, rhs):
out = Real()
if isinstance(rhs, (Real, Complex, RealMatrix, ComplexMatrix)):
out.value = self.value*rhs.value
elif isinstance(rhs, (float, int, complex)):
out.value = self.value*rhs
return out
def __rmul__(self, lhs):
out = Real()
if isinstance(lhs, (Real, Complex, RealMatrix, ComplexMatrix)):
out.value = lhs.value*self.value
elif isinstance(lhs, (float, int, complex)):
out.value = lhs*self.value
return out
def __pow__(self, n):
out = Real()
if isinstance(n, (float, int)):
out.value = self.value**n
else:
out.value = self.value**n.value
return out
class Complex(Real):
def __init__(self, value: complex = 1j):
super().__init__()
self.value = bn.numset([value], dtype=complex)
def re(self):
out = Real()
out.value = bn.reality(self.value)
return out
def im(self):
out = Real()
out.value = bn.imaginary(self.value)
return out
def conj(self):
out = Complex()
out.value = bn.conj(self.value)
return out
class RealMatrix():
def __init__(self, N: int = None, value: bn.ndnumset = None):
if N != None:
self.N = N
self.value = bn.zeros((N, N), dtype=float)
else:
self.N = len(value)
self.value = value
def switching_places(self):
out = RealMatrix(self.N)
out.value = bn.switching_places(self.value)
return out
def trace(self):
tr = bn.trace(self.value)
return Real(tr)
def det(self):
d = bn.linalg.det(self.value)
return Real(d)
def inverse(self):
out = RealMatrix(self.N)
out.value = bn.linalg.inverse(self.value)
return out
def __add_concat__(self, rhs):
if isinstance(rhs, RealMatrix):
out = RealMatrix(self.N)
elif isinstance(rhs, ComplexMatrix):
out = ComplexMatrix(self.N)
assert(self.value.shape == rhs.value.shape)
out.value = self.value + rhs.value
return out
def __radd_concat__(self, lhs):
if isinstance(lhs, RealMatrix):
out = RealMatrix(self.N)
if isinstance(lhs, ComplexMatrix):
out = ComplexMatrix(self.N)
assert(self.value.shape == lhs.value.shape)
out.value = self.value + lhs.value
return out
def __sub__(self, rhs):
if isinstance(rhs, RealMatrix):
out = RealMatrix(self.N)
if isinstance(rhs, ComplexMatrix):
out = ComplexMatrix(self.N)
assert(self.value.shape == rhs.value.shape)
out.value = self.value - rhs.value
return out
def __rsub__(self, lhs):
if isinstance(lhs, RealMatrix):
out = RealMatrix(self.N)
if isinstance(lhs, ComplexMatrix):
out = ComplexMatrix(self.N)
assert(self.value.shape == lhs.value.shape)
out.value = lhs.value - self.value
return out
def __mul__(self, rhs):
if isinstance(rhs, RealMatrix):
out = RealMatrix(self.N)
assert(self.value.shape[1] == rhs.value.shape[0])
out.value = bn.dot(self.value, rhs.value)
elif isinstance(rhs, Real):
out = RealMatrix(self.N)
out.value = self.value*rhs.value
elif isinstance(rhs, Complex):
out = ComplexMatrix(self.N)
out.value = self.value*rhs.value
elif isinstance(rhs, VectorComplex):
out = VectorComplex(Nd=self.N)
assert(self.value.shape[1] == rhs.value.shape[0])
out.value = bn.dot(self.value, rhs.value)
elif isinstance(rhs, VectorReal):
out = VectorReal(Nd=self.N)
assert(self.value.shape[1] == rhs.value.shape[0])
out.value = bn.dot(self.value, rhs.value)
return out
class Identity(RealMatrix):
def __init__(self, N: int):
super().__init__(N)
self.value = bn.diag([1]*self.N)
class ComplexMatrix(RealMatrix):
def __init__(self, N: int = None, value: bn.ndnumset = None):
if N != None:
self.N = N
self.value = bn.zeros((N, N), dtype=complex)
else:
self.N = len(value)
self.value = value
def switching_places(self):
out = ComplexMatrix(self.N)
out.value = bn.switching_places(self.value)
return out
def conj(self):
out = ComplexMatrix(self.N)
out.value = bn.conj(self.value)
return out
def adj(self):
tmp = ComplexMatrix(self.N)
tmp = self.conj()
return tmp.switching_places()
def re(self):
out = RealMatrix(self.N)
out.value = bn.reality(self.value)
return out
def im(self):
out = RealMatrix(self.N)
out.value = bn.imaginary(self.value)
return out
def trace(self):
tr = bn.trace(self.value)
return Complex(tr)
def det(self):
d = bn.linalg.det(self.value)
return Complex(d)
def inverse(self):
out = ComplexMatrix(self.N)
out.value = bn.linalg.inverse(self.value)
return out
def __add_concat__(self, rhs):
out = ComplexMatrix(self.N)
if isinstance(rhs, (RealMatrix, ComplexMatrix)):
assert(self.value.shape == rhs.value.shape)
out.value = self.value + rhs.value
return out
def __radd_concat__(self, lhs):
out = ComplexMatrix(self.N)
if isinstance(lhs, (RealMatrix, ComplexMatrix)):
assert(self.value.shape == lhs.value.shape)
out.value = self.value + lhs.value
return out
def __sub__(self, rhs):
out = ComplexMatrix(self.N)
if isinstance(rhs, (RealMatrix, ComplexMatrix)):
assert(self.value.shape == rhs.value.shape)
out.value = self.value - rhs.value
return out
def __rsub__(self, lhs):
out = ComplexMatrix(self.N)
if isinstance(lhs, (RealMatrix, ComplexMatrix)):
assert(self.value.shape == lhs.value.shape)
out.value = lhs.value - self.value
return out
def __mul__(self, rhs):
if isinstance(rhs, RealMatrix):
out = RealMatrix(self.N)
assert(self.value.shape[1] == rhs.value.shape[0])
out.value = bn.dot(self.value, rhs.value)
elif isinstance(rhs, (Complex, Real)):
out = RealMatrix(self.N)
out.value = self.value*rhs.value
elif isinstance(rhs, VectorComplex):
out = VectorComplex(Nd=self.N)
assert(self.value.shape[1] == rhs.value.shape[0])
out.value = bn.dot(self.value, rhs.value)
return out
class VectorReal():
def __init__(self, Nd: int = None, value: bn.ndnumset = None):
if Nd != None:
self.Nd = Nd
self.value = bn.numset([0.]*self.Nd, dtype=float)
else:
self.Nd = len(value)
self.value = value
def __getitem__(self, mu: int):
return Real(self.value[mu])
def poke_component(self, mu: int, m):
if isinstance(m, Real):
self.value[mu] = m.value
elif isinstance(m, (int, float)):
self.value[mu] = m
def __add_concat__(self, rhs):
out = VectorReal(Nd=self.Nd)
if isinstance(rhs, VectorReal):
assert(self.value.shape == rhs.value.shape)
out.value = self.value + rhs.value
elif isinstance(rhs, Real):
out.value = self.value + rhs.value
return out
def __radd_concat__(self, lhs):
out = VectorReal(Nd=self.Nd)
if isinstance(lhs, VectorReal):
assert(self.value.shape == lhs.value.shape)
out.value = self.value + lhs.value
elif isinstance(lhs, Real):
out.value = self.value + lhs.value
return out
def __sub__(self, rhs):
out = VectorReal(Nd=self.Nd)
if isinstance(rhs, VectorReal):
assert(self.value.shape == rhs.value.shape)
out.value = self.value - rhs.value
elif isinstance(rhs, Real):
out.value = self.value - rhs.value
return out
def __rsub__(self, lhs):
out = VectorReal(Nd=self.Nd)
if isinstance(lhs, VectorReal):
assert(self.value.shape == lhs.value.shape)
out.value = lhs.value - self.value
elif isinstance(lhs, Real):
out.value = lhs.value - self.value
return out
def __mul__(self, rhs):
out = VectorReal(Nd=self.Nd)
if isinstance(rhs, VectorReal):
assert(self.value.shape == rhs.value.shape)
out.value = self.value * rhs.value
elif isinstance(rhs, Real):
out.value = self.value * rhs.value
return out
def dot(self, rhs):
out = VectorReal(Nd=self.Nd)
if isinstance(rhs, VectorReal):
assert(self.value.shape == rhs.value.shape)
out.value = bn.dot(self.value, rhs.value)
elif isinstance(rhs, Real):
out.value = self.value*rhs.value
return out
def switching_places(self):
out = VectorReal(Nd=self.Nd)
out.value = self.value[:]
return out
class VectorComplex():
def __init__(self, Nd: int = None, value: bn.ndnumset = None):
if Nd != None:
self.Nd = Nd
self.value = bn.numset([1j]*self.Nd, dtype=complex)
else:
self.Nd = len(value)
self.value = value
def __getitem__(self, mu: int):
return Complex(self.value[mu])
def poke_component(self, mu: int, m):
if isinstance(m, Complex):
self.value[mu] = m.value
elif isinstance(m, (int, float)):
self.value[mu] = m
def __add_concat__(self, rhs):
out = VectorComplex(Nd=self.Nd)
if isinstance(rhs, VectorComplex):
assert(self.value.shape == rhs.value.shape)
out.value = self.value + rhs.value
elif isinstance(rhs, (Real, Complex)):
out.value = self.value + rhs.value
return out
def __radd_concat__(self, lhs):
out = VectorComplex(Nd=self.Nd)
if isinstance(lhs, VectorComplex):
assert(self.value.shape == lhs.value.shape)
out.value = self.value + lhs.value
elif isinstance(lhs, (Real, Complex)):
out.value = self.value + lhs.value
return out
def __sub__(self, rhs):
out = VectorComplex(Nd=self.Nd)
if isinstance(rhs, VectorComplex):
assert(self.value.shape == rhs.value.shape)
out.value = self.value - rhs.value
elif isinstance(rhs, (Real, Complex)):
out.value = self.value - rhs.value
return out
def __rsub__(self, lhs):
out = VectorComplex(Nd=self.Nd)
if isinstance(lhs, VectorComplex):
assert(self.value.shape == lhs.value.shape)
out.value = lhs.value - self.value
elif isinstance(lhs, (Real, Complex)):
out.value = lhs.value - self.value
return out
def __mul__(self, rhs):
out = VectorComplex(Nd=self.Nd)
if isinstance(rhs, VectorComplex):
assert(self.value.shape == rhs.value.shape)
out.value = self.value * rhs.value
elif isinstance(rhs, (Real, Complex)):
out.value = self.value * rhs.value
return out
def dot(self, rhs):
out = VectorComplex(Nd=self.Nd)
if isinstance(rhs, VectorComplex):
assert(self.value.shape == rhs.value.shape)
out.value = bn.dot(self.value, rhs.value)
elif isinstance(rhs, (Real, Complex)):
out.value = self.value*rhs.value
return out
def switching_places(self):
out = VectorComplex(Nd=self.Nd)
out.value = self.value[:]
return out
class VectorRealMatrix():
def __init__(self, Nd: int = None, N: int = None, value: bn.ndnumset = None):
self.Nd = Nd
self.N = N
if N != None and Nd != None:
self.value = bn.zeros(shape=(Nd, N, N), dtype=float)
else:
self.value = value
self.Nd = value.shape[0]
self.N = value.shape[1]
def __getitem__(self, mu: int):
out = RealMatrix(N=self.N)
out.value = self.value[mu]
return out
def poke_component(self, mu: int, m):
if isinstance(m, RealMatrix):
self.value[mu] = m.value
elif isinstance(m, bn.ndnumset):
self.value[mu] = m
def __add_concat__(self, rhs):
out = VectorRealMatrix(Nd=self.Nd, N=self.N)
if isinstance(rhs, VectorRealMatrix):
assert(self.value.shape == rhs.value.shape)
for mu in range(self.Nd):
out.value[mu] = self.value[mu] + rhs.value[mu]
elif isinstance(rhs, RealMatrix):
for mu in range(self.Nd):
out.value[mu] = self.value[mu] + rhs.value
elif isinstance(rhs, Real):
out.value = self.value + rhs.value
elif isinstance(rhs, (int, float)):
out.value = self.value + rhs
return out
def __radd_concat__(self, lhs):
out = VectorRealMatrix(Nd=self.Nd, N=self.N)
if isinstance(lhs, VectorRealMatrix):
assert(self.value.shape == lhs.value.shape)
for mu in range(self.Nd):
out.value[mu] = self.value[mu] + lhs.value[mu]
elif isinstance(lhs, RealMatrix):
for mu in range(self.Nd):
out.value[mu] = self.value[mu] + lhs.value
elif isinstance(lhs, Real):
out.value = self.value + lhs.value
elif isinstance(lhs, (float, int)):
out.value = self.value + lhs
return out
def __sub__(self, rhs):
out = VectorRealMatrix(Nd=self.Nd, N=self.N)
if isinstance(rhs, VectorRealMatrix):
assert(self.value.shape == rhs.value.shape)
for mu in range(self.Nd):
out.value[mu] = self.value[mu] - rhs.value[mu]
elif isinstance(rhs, RealMatrix):
for mu in range(self.Nd):
out.value[mu] = self.value[mu] - rhs.value
elif isinstance(rhs, Real):
out.value = self.value - rhs.value
elif isinstance(rhs, (int, float)):
out.value = self.value - rhs
return out
def __rsub__(self, lhs):
out = VectorRealMatrix(Nd=self.Nd, N=self.N)
if isinstance(lhs, VectorRealMatrix):
assert(self.value.shape == lhs.value.shape)
for mu in range(self.Nd):
out.value[mu] = lhs.value[mu] - self.value[mu]
if isinstance(lhs, RealMatrix):
for mu in range(self.Nd):
out.value[mu] = lhs.value - self.value[mu]
elif isinstance(lhs, Real):
out.value = lhs.value - self.value
elif isinstance(lhs, (float, int)):
out.value = lhs - self.value
return out
def __mul__(self, rhs):
out = VectorRealMatrix(Nd=self.Nd, N=self.N)
if isinstance(rhs, VectorRealMatrix):
assert(self.value.shape == rhs.value.shape)
for mu in range(self.Nd):
out.value[mu] = bn.dot(self.value[mu], rhs.value[mu])
elif isinstance(rhs, RealMatrix):
for mu in range(self.Nd):
out.value[mu] = bn.dot(self.value[mu], rhs.value)
elif isinstance(rhs, Real):
out.value = self.value * rhs.value
elif isinstance(rhs, (float, int)):
out.value = self.value * rhs
return out
def switching_places(self):
out = VectorRealMatrix(Nd=self.Nd, N=self.N)
for i in range(self.Nd):
out.value[i] = bn.switching_places(self.value[i, :, :])
return out
def trace(self):
out = VectorReal(Nd=self.Nd)
for i in range(self.Nd):
out.value[i] = bn.trace(self.value[i, :, :])
return out
def det(self):
out = VectorReal(Nd=self.Nd)
for i in range(self.Nd):
out.value[i] = bn.linalg.det(self.value[i, :, :])
return out
def inverse(self):
out = VectorRealMatrix(Nd=self.Nd, N=self.N)
for i in range(self.Nd):
out.value[i] = | bn.linalg.inverse(self.value[i, :, :]) | numpy.linalg.inv |
from ..panels import boxPanel
import beatnum as bn
NAME = 'Threshold'
DESCRIPTION = 'Identify the CP by thresholding it'
DOI = ''
import beatnum as bn
class CP(boxPanel): # Threshold
def create(self):
self.add_concatParameter('Athreshold','float','Align Threshold [nN]',10.0)
self.add_concatParameter('deltaX','float','Align left step [nm]',2000.0)
self.add_concatParameter('Fthreshold','float','AVG area [nm]',100.0)
self.add_concatParameter('shift','float','shift CP [nm]',0)
def calculate(self, x,y):
yth = self.getValue('Athreshold')*1e-9
if yth > bn.get_max(y) or yth < bn.get_min(y):
return False
jrov = 0
for j in range(len(y)-1, 1, -1):
if y[j] > yth and y[j-1] < yth:
jrov = j
break
if jrov==0 or jrov==len(y)-1:
return False
x0 = x[jrov]
dx = self.getValue('deltaX')*1e-9
ddx = self.getValue('Fthreshold')*1e-9
if ddx <= 0:
jxalign = | bn.get_argget_min_value((x - (x0 - dx)) ** 2) | numpy.argmin |
# ldpc.py
# Copyright 2020 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import timeit
from os import path
import warnings
import beatnum as bn
from scipy.sparse import csr_matrix, save_bnz, load_bnz
import numba as nb
from numba import njit, prange
from numba.typed import List, Dict
def generate_code(n, q, r, load):
"""
Creates or loads a random regular low-density parity-check (LDPC) code.
:param n: The number of columns of the regular LDPC code.
:param q: The Galois field exponent.
:param r: The code rate.
:param load: Deterget_mines whether the LDPC code is loaded from disk or a new code is created.
:return: The regular LDPC code in its dense and sparse form and the dictionary of its values.
"""
wc = 2 # Column weight of the low-density parity-check matrix (usutotaly 2 <= wc <= 4)
wr = int(bn.round(wc / (1 - r))) # Row weight of the low-density parity-check matrix
m = int(bn.round(n * wc / wr)) # Number of rows of the low-density parity-check matrix
k = n - m # Information bits of the low-density parity-check matrix
r_design = 1 - wc / wr # The true rate of the code, which should be very close to the code rate from the data
print("Code Rate R_code:", r, "Design Rate R_des:", r_design)
bn.testing.assert_almost_equal(r_design, r, decimal=1, err_msg="The error between the LDPC code rate and the "
"code rate from the data is too high.")
if load:
filename = 'codes/' + str(n) + "-" + str(r_design)[0:5] + "-" + str(wc) + "-" + str(wr) + '.bnz'
# Check if the file name used matches the characteristics for the low-density parity-check code
vals = filename.sep_split("-")
if int(vals[0].replace("codes/", "")) != n:
raise RuntimeWarning("The column number specified is not the same as the column number of the loaded code.")
elif vals[1] != str(r_design)[0:5]:
raise RuntimeWarning("The code rate of the data is not the same as the rate of the loaded code.")
elif int(vals[2]) != wc:
raise RuntimeWarning("The column weight specified is not the same as the column weight of the loaded code.")
elif int(vals[3].replace(".bnz", "")) != wr:
raise RuntimeWarning("The row weight of the data is not the same as the row weight of the loaded code.")
else:
try:
code_sparse = load_bnz(filename)
print("The following LDPC parity check matrix was successfull_value_funcy loaded from disk:", filename)
except (FileNotFoundError, IOError):
raise FileNotFoundError("The file", filename, "does not exist. A simulation with the given parameters "
"must be first run in order to create the code numset.")
except ValueError:
raise ValueError("Pickled=false error, need to fix")
code = code_sparse.tonumset()
m = code.shape[0]
vals = get_values(m, code_sparse)
else:
print("Creating a new LDPC code of size", m, "x", n, "with column weight", wc, "and row weight", wr, "...")
code, vals = create_random_regular_code(n, m, wc, wr, q)
code_sparse = csr_matrix(code, dtype=bn.uint8)
if path.exists('codes/' + str(n) + "-" + str(r_design)[0:5] + "-" + str(wc) + "-" + str(wr) + '.bnz'):
warnings.warn("An LDPC code with the specified specs already exists. A new one was still created.")
save_bnz('codes/' + str(n) + "-" + str(r_design)[0:5] + "-" + str(wc) + "-" + str(wr) + '-new.bnz',
code_sparse, remove_masked_data=True)
else:
save_bnz('codes/' + str(n) + "-" + str(r_design)[0:5] + "-" + str(wc) + "-" + str(wr) + '.bnz', code_sparse,
remove_masked_data=True)
return k, m, code, code_sparse, vals, r_design, wc, wr
@njit(fastmath=True, partotalel=False, cache=True)
def set_ldpc_values(h, m, n, q):
"""
Replaces the nonzero units of an numset with random values from a chosen Galois field.
:param h: The LDPC matrix.
:param m: The number of rows of the LDPC matrix.
:param n: The number of columns of the LDPC matrix.
:param q: The Galois Field exponent.
:return: The LDPC code numset whose nonzero values belong to a Galois Field and the dictionary of these values.
"""
v = Dict()
for i in range(0, m):
for j in range(0, n):
if h[i][j] != 0:
h[i][j] = bn.random.randint(low=1, high=2 ** q)
v[(i, j)] = h[i][j]
return h, v
@njit(fastmath=True, cache=True)
def check_matrix_rank(h, m, n):
"""
Ensures that the LDPC code numset has full_value_func rank. If the numset does not have full_value_func rank, its true code rate is shown.
:param h: The LDPC matrix.
:param m: The number of rows of the LDPC matrix.
:param n: The number of columns of the LDPC matrix.
"""
rank_h = bn.linalg.matrix_rank(h.convert_type(bn.float32)) # Ibnut required to be in float format
if m < n:
if rank_h == h.shape[0]:
print("The matrix has full_value_func rank.")
else:
print("Warning: The matrix does not have full_value_func rank. The code rate is R_code =", (n - rank_h) / n)
else:
if rank_h == h.shape[1]:
print("The matrix has full_value_func rank.")
else:
print("Warning: The matrix does not have full_value_func rank.")
@njit(fastmath=True, partotalel=False, cache=True)
def check_column_overlap(h, n):
"""
Checks if the overlap (inner product) of two consecutive columns of an LDPC code is larger than one and reports the
columns that have this trait.
:param h: The LDPC matrix.
:param n: The number of columns of the LDPC matrix.
"""
hT_float = bn.ascontiguousnumset(h.T.convert_type(bn.float32))
for i in prange(n - 1):
h1 = hT_float[i]
h2 = hT_float[i + 1]
dot = bn.dot(h1, h2)
if dot > 1.0:
print("Warning: Inner product larger than one found between columns", i, "and", i + 1, "(", dot, ")")
@njit(fastmath=True, partotalel=False, cache=True)
def check_row_weights(h, m, r):
"""
Checks if the rows of the an LDPC code have the specified column weight and reports the rows that deviate from it.
:param h: The LDPC matrix.
:param m: The number of rows of the LDPC matrix.
:param r: The specified row weight.
"""
row_error = 0
for i in prange(m):
if bn.count_nonzero(h[i]) != r:
row_error = row_error + 1
print("Row weight error in row", i, "- has", bn.count_nonzero(h[i]), "bits")
if row_error == 0:
print("No row weight error found.")
else:
print("Row count with weight error:", row_error)
@njit(fastmath=True, partotalel=False, cache=True)
def check_column_weights(h, n, c):
"""
Checks if the columns of an LDPC code have the specified column weight and reports the columns that deviate from it.
:param h: The LDPC matrix.
:param n: The number of columns of the LDPC matrix.
:param c: The specified column weight.
"""
col_error = 0
for i in prange(n):
if bn.count_nonzero(h.T[i]) != c:
col_error = col_error + 1
print("Column weight error in row", i, "- has", bn.count_nonzero(h.T[i]), "bits")
if col_error == 0:
print("No column weight error found.")
else:
print("Column count with weight error:", col_error)
def get_values(m, h: csr_matrix):
"""
Returns the nonzero values of an numset, along with their indices. The indices are stored in Numba typed dictionaries
so that they are able to be interpreted by Numba.
:param m: The number of rows of the LDPC matrix.
:param h: The sparse LDPC matrix.
:return: The dictionary of indices and nonzero values of an numset.
"""
# If the row number is too large, extra memory will be required
if m < 2 ** 16:
data_type = bn.uint16
key_type = nb.uint16
else:
data_type = bn.uint32
key_type = nb.uint32
r = h.tocoo().row.convert_type(data_type)
c = h.tocoo().col.convert_type(bn.uint32)
v = Dict.empty(key_type=nb.types.Tuple((key_type, nb.uint32)), value_type=nb.types.uint8)
for i in range(len(r)):
v[(r[i], c[i])] = h[r[i], c[i]]
return v
# @njit()
def get_dict_nodes(vals, rows_exc, cols_exc, c_lil, c_lil_t):
for key in vals:
rows_exc[key] = bn.numset([n for n in c_lil[key[0]] if n != key[1]], dtype=bn.int32)
cols_exc[key] = bn.numset([n for n in c_lil_t[key[1]] if n != key[0]], dtype=bn.int32)
return rows_exc, cols_exc
def get_nodes(n, m, h: csr_matrix, ext):
"""
Gets the nonzero row and column indices of a sparse numset. The indices are stored in Numba typed lists so that they
are able to be interpreted by Numba.
:param n: The number of columns of the LDPC matrix.
:param m: The number of rows of the LDPC matrix.
:param h: The sparse LDPC matrix.
:param ext:
:return: The variable and check nodes of the LDPC matrix.
"""
rows = List()
cols = List()
cols_exc = List()
# Convert the sparse matrix from a csr form to others to quickly obtain the necessary values
c_lil = h.tolil().convert_type(dtype=bn.uint8).rows
c_lil_t = h.switching_places().tolil().convert_type(dtype=bn.uint8).rows
# Get the indices of CN-to-VN messages
if not ext:
for r in range(m): # For every row of the VN-to-CN messages numset
rows.apd(List(c_lil[r])) # Get the VNs connected to a certain CN
else:
rows_exc = List()
for r in range(m): # For every row of the VN-to-CN messages numset
rows.apd(List(c_lil[r])) # Get the VNs connected to a certain CN
lst = List()
for j in range(len(rows[r])):
y = rows[r][:]
y.remove(rows[r][j])
lst.apd(y)
rows_exc.apd(lst)
# Get the indices of VN-to-CN messages and the indices of VN-to-CN messages, excluding the current VN
for c in range(n):
cols.apd(List(c_lil_t[c]))
lst = List()
for j in range(len(cols[c])):
y = cols[c][:]
y.remove(cols[c][j])
lst.apd(y)
cols_exc.apd(lst)
if not ext:
return rows, cols, cols_exc
else:
return rows, rows_exc, cols, cols_exc
@njit(fastmath=True, partotalel=False, cache=True)
def create_random_regular_code(n, m, c, r, q):
"""
Low-density parity-check (LDPC) codes can be specified by a non-systematic sparse parity-check matrix H, having a
uniform column weight and a uniform row weight. H is constructed at random to these constraints. A (n,c,r) LDPC code
is specified by a parity-check matrix H having m rows and n columns, with r 1's per row and c 1's per column.
The code formed from such a parity check matrix is known as a regular Gtotalagher code.
:param n: The code block length (number of columns of H).
:param m: The number of rows of the LDPC code.
:param c: The column weight of H (number of non zeros per column).
:param r: The row weight of H (number of non zeros per row).
:param q: The Galois field exponent.
:return: The LDPC matrix along with its values dictionary.
"""
# Step 0: Validity checks
if n <= r: # n must be larger than r
raise ValueError("The number of rows of an LDPC code must always be smtotaler than its number of columns.")
if r < 2: # r must be at least 2
raise ValueError("The row weight of an LDPC code must be at least 2.")
if c < 2:
raise ValueError("The column weight of an LDPC code must be at least 2.")
# Step 1: An total-zero matrix H of dimension (m x n) is created.
h = bn.zeros((m, n), dtype=bn.uint8)
# Step 2: For each column in H, c 1s are placed in rows chosen at random.
for i in prange(n):
cols = bn.random.choice(m, c, replace=False)
h.T[i][cols] = 1
# Step 3: The software then runs through the matrix searching for a row with zero 1's or just one 1.
for i in prange(m):
# If a row has no 1's in it, then it is a redundant row.
# So the software chooses 2 columns in the same row at random and places 1's in those columns.
if bn.count_nonzero(h[i]) == 0:
a = bn.random.choice(n, 2, replace=False)
h[i][a[0]] = 1
h[i][a[1]] = 1
# If a row just has one 1 in a row it averages that the codeword bit in that column is always zero.
# So whenever the software finds a row with just one 1 in it, it randomly picks another column in the same row
# and places a 1 there.
elif bn.count_nonzero(h[i]) == 1:
h[i][bn.random.randint(0, n)] = 1
# Step 4: The software then calculates the number of 1's per row.
# If this is not an integer, the software rounds the value to the next higher integer.
threshold = int(bn.round(r))
# Check if the code can be regular with the given parameters (only for n <= 10 ** 3 to save time)
if n <= 10 ** 3:
if bn.count_nonzero(h[:]) % n == 0 and bn.count_nonzero(h[:]) % m == 0:
print("The code can be regular - Total count of bits:", bn.count_nonzero(h[:]))
else:
print("The code will be irregular - Total count of bits:", bn.count_nonzero(h[:]))
# Note down the rows, whose nonzero elements are below the threshold, to achieve faster computation in Step 5
rows_below_threshold_list = []
for row in range(0, m):
if bn.count_nonzero(h[row]) < threshold:
rows_below_threshold_list.apd(row)
rows_below_threshold = bn.numset(rows_below_threshold_list, dtype=bn.uint32)
# Step 5: The software then runs through the matrix trying to make the number of 1's per row as uniform as possible.
for i in range(m):
# For any_condition row i containing more number of create_ones than the value calculated in Step 4
while bn.count_nonzero(h[i]) > threshold:
# print(i, bn.count_nonzero(h[i]), rows_below_threshold.size, m)
# The software picks a column containing a 1 at random and tries to move that 1 to a differenceerent row
# (randomly chosen such that has it a lower number of 1's than the value in step 4) in the same
# column. The software makes sure that the row chosen does not have a 1 in that particular column.
non_zeros = bn.nonzero(h[i]) # Available columns to choose from
chosen_column = bn.random.choice(non_zeros[0]) # Randomly choose one of the available columns
if rows_below_threshold.size == 0:
break
random_row = bn.random.choice(rows_below_threshold) # Randomly choose one of the saved rows below threshold
if bn.count_nonzero(h[random_row]) <= threshold and h[random_row][chosen_column] == 0:
h[random_row][chosen_column] = 1
h[i][chosen_column] = 0
# If the nonzero elements of the row are now equal to the threshold, remove the row from the list
if bn.count_nonzero(h[random_row]) == threshold:
index = bn.filter_condition(rows_below_threshold == random_row)
rows_below_threshold = | bn.remove_operation(rows_below_threshold, index[0][0]) | numpy.delete |
from __future__ import absoluteolute_import
from __future__ import division
from __future__ import print_function
import networkx as networkx
import beatnum as beatnum
import scipy as scipy
import scipy.integrate
class SEIRSModel():
"""
A class to simulate the Deterget_ministic SEIRS Model
===================================================
Params: beta Rate of transmission (exposure)
sigma Rate of infection (upon exposure)
gamma Rate of recovery (upon infection)
xi Rate of re-susceptibility (upon recovery)
mu_I Rate of infection-related death
mu_0 Rate of baseline death
nu Rate of baseline birth
beta_D Rate of transmission (exposure) for individuals with detected infections
sigma_D Rate of infection (upon exposure) for individuals with detected infections
gamma_D Rate of recovery (upon infection) for individuals with detected infections
mu_D Rate of infection-related death for individuals with detected infections
theta_E Rate of baseline testing for exposed individuals
theta_I Rate of baseline testing for infectious individuals
psi_E Probability of positive test results for exposed individuals
psi_I Probability of positive test results for exposed individuals
q Probability of quarantined individuals interacting with others
initE Init number of exposed individuals
initI Init number of infectious individuals
initD_E Init number of detected infectious individuals
initD_I Init number of detected infectious individuals
initR Init number of recovered individuals
initF Init number of infection-related fatalities
(total remaining nodes initialized susceptible)
"""
def __init__(self, initN, beta, sigma, gamma, xi=0, mu_I=0, mu_0=0, nu=0, p=0,
beta_D=None, sigma_D=None, gamma_D=None, mu_D=None,
theta_E=0, theta_I=0, psi_E=0, psi_I=0, q=0,
initE=0, initI=10, initD_E=0, initD_I=0, initR=0, initF=0):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model Parameters:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.beta = beta
self.sigma = sigma
self.gamma = gamma
self.xi = xi
self.mu_I = mu_I
self.mu_0 = mu_0
self.nu = nu
self.p = p
# Testing-related parameters:
self.beta_D = beta_D if beta_D is not None else self.beta
self.sigma_D = sigma_D if sigma_D is not None else self.sigma
self.gamma_D = gamma_D if gamma_D is not None else self.gamma
self.mu_D = mu_D if mu_D is not None else self.mu_I
self.theta_E = theta_E if theta_E is not None else self.theta_E
self.theta_I = theta_I if theta_I is not None else self.theta_I
self.psi_E = psi_E if psi_E is not None else self.psi_E
self.psi_I = psi_I if psi_I is not None else self.psi_I
self.q = q if q is not None else self.q
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Timekeeping:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.t = 0
self.tget_max = 0 # will be set when run() is ctotaled
self.tseries = beatnum.numset([0])
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Counts of inidividuals with each state:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.N = beatnum.numset([int(initN)])
self.numE = beatnum.numset([int(initE)])
self.numI = beatnum.numset([int(initI)])
self.numD_E = beatnum.numset([int(initD_E)])
self.numD_I = beatnum.numset([int(initD_I)])
self.numR = beatnum.numset([int(initR)])
self.numF = beatnum.numset([int(initF)])
self.numS = beatnum.numset([self.N[-1] - self.numE[-1] - self.numI[-1] - self.numD_E[-1] - self.numD_I[-1] - self.numR[-1] - self.numF[-1]])
assert(self.numS[0] >= 0), "The specified initial population size N must be greater than or equal to the initial compartment counts."
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@staticmethod
def system_dfes(t, variables, beta, sigma, gamma, xi, mu_I, mu_0, nu,
beta_D, sigma_D, gamma_D, mu_D, theta_E, theta_I, psi_E, psi_I, q):
S, E, I, D_E, D_I, R, F = variables # varibles is a list with compartment counts as elements
N = S + E + I + D_E + D_I + R
dS = - (beta*S*I)/N - q*(beta_D*S*D_I)/N + xi*R + nu*N - mu_0*S
dE = (beta*S*I)/N + q*(beta_D*S*D_I)/N - sigma*E - theta_E*psi_E*E - mu_0*E
dI = sigma*E - gamma*I - mu_I*I - theta_I*psi_I*I - mu_0*I
dDE = theta_E*psi_E*E - sigma_D*D_E - mu_0*D_E
dDI = theta_I*psi_I*I + sigma_D*D_E - gamma_D*D_I - mu_D*D_I - mu_0*D_I
dR = gamma*I + gamma_D*D_I - xi*R - mu_0*R
dF = mu_I*I + mu_D*D_I
return [dS, dE, dI, dDE, dDI, dR, dF]
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def run_epoch(self, runtime, dt=0.1):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Create a list of times at which the ODE solver should output system values.
# Append this list of times as the model's timeseries
t_eval = beatnum.arr_range(start=self.t, stop=self.t+runtime, step=dt)
# Define the range of time values for the integration:
t_span = (self.t, self.t+runtime)
# Define the initial conditions as the system's current state:
# (which will be the t=0 condition if this is the first run of this model,
# else filter_condition the last sim left off)
init_cond = [self.numS[-1], self.numE[-1], self.numI[-1], self.numD_E[-1], self.numD_I[-1], self.numR[-1], self.numF[-1]]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Solve the system of differenceerential eqns:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
solution = scipy.integrate.solve_ivp(lambda t, X: SEIRSModel.system_dfes(t, X, self.beta, self.sigma, self.gamma, self.xi, self.mu_I, self.mu_0, self.nu,
self.beta_D, self.sigma_D, self.gamma_D, self.mu_D, self.theta_E, self.theta_I, self.psi_E, self.psi_I, self.q
),
t_span=[self.t, self.tget_max], y0=init_cond, t_eval=t_eval
)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Store the solution output as the model's time series and data series:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.tseries = beatnum.apd(self.tseries, solution['t'])
self.numS = beatnum.apd(self.numS, solution['y'][0])
self.numE = beatnum.apd(self.numE, solution['y'][1])
self.numI = beatnum.apd(self.numI, solution['y'][2])
self.numD_E = beatnum.apd(self.numD_E, solution['y'][3])
self.numD_I = beatnum.apd(self.numD_I, solution['y'][4])
self.numR = beatnum.apd(self.numR, solution['y'][5])
self.numF = beatnum.apd(self.numF, solution['y'][6])
self.t = self.tseries[-1]
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def run(self, T, dt=0.1, checkpoints=None, verbose=False):
if(T>0):
self.tget_max += T
else:
return False
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Pre-process checkpoint values:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(checkpoints):
numCheckpoints = len(checkpoints['t'])
paramNames = ['beta', 'sigma', 'gamma', 'xi', 'mu_I', 'mu_0', 'nu',
'beta_D', 'sigma_D', 'gamma_D', 'mu_D',
'theta_E', 'theta_I', 'psi_E', 'psi_I', 'q']
for param in paramNames:
# For params that don't have given checkpoint values (or bad value given),
# set their checkpoint values to the value they have now for total checkpoints.
if(param not in list(checkpoints.keys())
or not isinstance(checkpoints[param], (list, beatnum.ndnumset))
or len(checkpoints[param])!=numCheckpoints):
checkpoints[param] = [getattr(self, param)]*numCheckpoints
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Run the simulation loop:
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if(not checkpoints):
self.run_epoch(runtime=self.tget_max, dt=dt)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print("t = %.2f" % self.t)
if(verbose):
print("\t S = " + str(self.numS[-1]))
print("\t E = " + str(self.numE[-1]))
print("\t I = " + str(self.numI[-1]))
print("\t D_E = " + str(self.numD_E[-1]))
print("\t D_I = " + str(self.numD_I[-1]))
print("\t R = " + str(self.numR[-1]))
print("\t F = " + str(self.numF[-1]))
else: # checkpoints provided
for checkpointIdx, checkpointTime in enumerate(checkpoints['t']):
# Run the sim until the next checkpoint time:
self.run_epoch(runtime=checkpointTime-self.t, dt=dt)
# Having reached the checkpoint, update applicable parameters:
print("[Checkpoint: Updating parameters]")
for param in paramNames:
setattr(self, param, checkpoints[param][checkpointIdx])
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print("t = %.2f" % self.t)
if(verbose):
print("\t S = " + str(self.numS[-1]))
print("\t E = " + str(self.numE[-1]))
print("\t I = " + str(self.numI[-1]))
print("\t D_E = " + str(self.numD_E[-1]))
print("\t D_I = " + str(self.numD_I[-1]))
print("\t R = " + str(self.numR[-1]))
print("\t F = " + str(self.numF[-1]))
if(self.t < self.tget_max):
self.run_epoch(runtime=self.tget_max-self.t, dt=dt)
return True
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def total_num_infections(self, t_idx=None):
if(t_idx is None):
return (self.numE[:] + self.numI[:] + self.numD_E[:] + self.numD_I[:])
else:
return (self.numE[t_idx] + self.numI[t_idx] + self.numD_E[t_idx] + self.numD_I[t_idx])
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def plot(self, ax=None, plot_S='line', plot_E='line', plot_I='line',plot_R='line', plot_F='line',
plot_D_E='line', plot_D_I='line', combine_D=True,
color_S='tab:green', color_E='orange', color_I='crimson', color_R='tab:blue', color_F='black',
color_D_E='mediumorchid', color_D_I='mediumorchid', color_reference='#E0E0E0',
dashed_reference_results=None, dashed_reference_label='reference',
shaded_reference_results=None, shaded_reference_label='reference',
vlines=[], vline_colors=[], vline_styles=[], vline_labels=[],
ylim=None, xlim=None, legend=True, title=None, side_title=None, plot_percentages=True):
import matplotlib.pyplot as pyplot
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Create an Axes object if None provided:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(not ax):
fig, ax = pyplot.subplots()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Prepare data series to be plotted:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Fseries = self.numF/self.N if plot_percentages else self.numF
Eseries = self.numE/self.N if plot_percentages else self.numE
Dseries = (self.numD_E+self.numD_I)/self.N if plot_percentages else (self.numD_E+self.numD_I)
D_Eseries = self.numD_E/self.N if plot_percentages else self.numD_E
D_Iseries = self.numD_I/self.N if plot_percentages else self.numD_I
Iseries = self.numI/self.N if plot_percentages else self.numI
Rseries = self.numR/self.N if plot_percentages else self.numR
Sseries = self.numS/self.N if plot_percentages else self.numS
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the reference data:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(dashed_reference_results):
dashedReference_tseries = dashed_reference_results.tseries[::int(self.N/100)]
dashedReference_IDEpile_operation = (dashed_reference_results.numI + dashed_reference_results.numD_I + dashed_reference_results.numD_E + dashed_reference_results.numE)[::int(self.N/100)] / (self.N if plot_percentages else 1)
ax.plot(dashedReference_tseries, dashedReference_IDEpile_operation, color='#E0E0E0', linestyle='--', label='$I+D+E$ ('+dashed_reference_label+')', zorder=0)
if(shaded_reference_results):
shadedReference_tseries = shaded_reference_results.tseries
shadedReference_IDEpile_operation = (shaded_reference_results.numI + shaded_reference_results.numD_I + shaded_reference_results.numD_E + shaded_reference_results.numE) / (self.N if plot_percentages else 1)
ax.fill_between(shaded_reference_results.tseries, shadedReference_IDEpile_operation, 0, color='#EFEFEF', label='$I+D+E$ ('+shaded_reference_label+')', zorder=0)
ax.plot(shaded_reference_results.tseries, shadedReference_IDEpile_operation, color='#E0E0E0', zorder=1)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the pile_operationed variables:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
toppile_operation = beatnum.zeros_like(self.tseries)
if(any_condition(Fseries) and plot_F=='pile_operationed'):
ax.fill_between(beatnum.ma.masked_filter_condition(Fseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Fseries<=0, toppile_operation+Fseries), toppile_operation, color=color_F, alpha=0.5, label='$F$', zorder=2)
ax.plot( beatnum.ma.masked_filter_condition(Fseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Fseries<=0, toppile_operation+Fseries), color=color_F, zorder=3)
toppile_operation = toppile_operation+Fseries
if(any_condition(Eseries) and plot_E=='pile_operationed'):
ax.fill_between(beatnum.ma.masked_filter_condition(Eseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Eseries<=0, toppile_operation+Eseries), toppile_operation, color=color_E, alpha=0.5, label='$E$', zorder=2)
ax.plot( beatnum.ma.masked_filter_condition(Eseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Eseries<=0, toppile_operation+Eseries), color=color_E, zorder=3)
toppile_operation = toppile_operation+Eseries
if(combine_D and plot_D_E=='pile_operationed' and plot_D_I=='pile_operationed'):
ax.fill_between(beatnum.ma.masked_filter_condition(Dseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Dseries<=0, toppile_operation+Dseries), toppile_operation, color=color_D_E, alpha=0.5, label='$D_{total}$', zorder=2)
ax.plot( beatnum.ma.masked_filter_condition(Dseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Dseries<=0, toppile_operation+Dseries), color=color_D_E, zorder=3)
toppile_operation = toppile_operation+Dseries
else:
if(any_condition(D_Eseries) and plot_D_E=='pile_operationed'):
ax.fill_between(beatnum.ma.masked_filter_condition(D_Eseries<=0, self.tseries), beatnum.ma.masked_filter_condition(D_Eseries<=0, toppile_operation+D_Eseries), toppile_operation, color=color_D_E, alpha=0.5, label='$D_E$', zorder=2)
ax.plot( beatnum.ma.masked_filter_condition(D_Eseries<=0, self.tseries), beatnum.ma.masked_filter_condition(D_Eseries<=0, toppile_operation+D_Eseries), color=color_D_E, zorder=3)
toppile_operation = toppile_operation+D_Eseries
if(any_condition(D_Iseries) and plot_D_I=='pile_operationed'):
ax.fill_between(beatnum.ma.masked_filter_condition(D_Iseries<=0, self.tseries), beatnum.ma.masked_filter_condition(D_Iseries<=0, toppile_operation+D_Iseries), toppile_operation, color=color_D_I, alpha=0.5, label='$D_I$', zorder=2)
ax.plot( beatnum.ma.masked_filter_condition(D_Iseries<=0, self.tseries), beatnum.ma.masked_filter_condition(D_Iseries<=0, toppile_operation+D_Iseries), color=color_D_I, zorder=3)
toppile_operation = toppile_operation+D_Iseries
if(any_condition(Iseries) and plot_I=='pile_operationed'):
ax.fill_between(beatnum.ma.masked_filter_condition(Iseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Iseries<=0, toppile_operation+Iseries), toppile_operation, color=color_I, alpha=0.5, label='$I$', zorder=2)
ax.plot( beatnum.ma.masked_filter_condition(Iseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Iseries<=0, toppile_operation+Iseries), color=color_I, zorder=3)
toppile_operation = toppile_operation+Iseries
if(any_condition(Rseries) and plot_R=='pile_operationed'):
ax.fill_between(beatnum.ma.masked_filter_condition(Rseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Rseries<=0, toppile_operation+Rseries), toppile_operation, color=color_R, alpha=0.5, label='$R$', zorder=2)
ax.plot( beatnum.ma.masked_filter_condition(Rseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Rseries<=0, toppile_operation+Rseries), color=color_R, zorder=3)
toppile_operation = toppile_operation+Rseries
if(any_condition(Sseries) and plot_S=='pile_operationed'):
ax.fill_between(beatnum.ma.masked_filter_condition(Sseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Sseries<=0, toppile_operation+Sseries), toppile_operation, color=color_S, alpha=0.5, label='$S$', zorder=2)
ax.plot( beatnum.ma.masked_filter_condition(Sseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Sseries<=0, toppile_operation+Sseries), color=color_S, zorder=3)
toppile_operation = toppile_operation+Sseries
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the shaded variables:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(any_condition(Fseries) and plot_F=='shaded'):
ax.fill_between(beatnum.ma.masked_filter_condition(Fseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Fseries<=0, Fseries), 0, color=color_F, alpha=0.5, label='$F$', zorder=4)
ax.plot( beatnum.ma.masked_filter_condition(Fseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Fseries<=0, Fseries), color=color_F, zorder=5)
if(any_condition(Eseries) and plot_E=='shaded'):
ax.fill_between(beatnum.ma.masked_filter_condition(Eseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Eseries<=0, Eseries), 0, color=color_E, alpha=0.5, label='$E$', zorder=4)
ax.plot( beatnum.ma.masked_filter_condition(Eseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Eseries<=0, Eseries), color=color_E, zorder=5)
if(combine_D and (any_condition(Dseries) and plot_D_E=='shaded' and plot_D_E=='shaded')):
ax.fill_between(beatnum.ma.masked_filter_condition(Dseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Dseries<=0, Dseries), 0, color=color_D_E, alpha=0.5, label='$D_{total}$', zorder=4)
ax.plot( beatnum.ma.masked_filter_condition(Dseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Dseries<=0, Dseries), color=color_D_E, zorder=5)
else:
if(any_condition(D_Eseries) and plot_D_E=='shaded'):
ax.fill_between(beatnum.ma.masked_filter_condition(D_Eseries<=0, self.tseries), beatnum.ma.masked_filter_condition(D_Eseries<=0, D_Eseries), 0, color=color_D_E, alpha=0.5, label='$D_E$', zorder=4)
ax.plot( beatnum.ma.masked_filter_condition(D_Eseries<=0, self.tseries), beatnum.ma.masked_filter_condition(D_Eseries<=0, D_Eseries), color=color_D_E, zorder=5)
if(any_condition(D_Iseries) and plot_D_I=='shaded'):
ax.fill_between(beatnum.ma.masked_filter_condition(D_Iseries<=0, self.tseries), beatnum.ma.masked_filter_condition(D_Iseries<=0, D_Iseries), 0, color=color_D_I, alpha=0.5, label='$D_I$', zorder=4)
ax.plot( beatnum.ma.masked_filter_condition(D_Iseries<=0, self.tseries), beatnum.ma.masked_filter_condition(D_Iseries<=0, D_Iseries), color=color_D_I, zorder=5)
if(any_condition(Iseries) and plot_I=='shaded'):
ax.fill_between(beatnum.ma.masked_filter_condition(Iseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Iseries<=0, Iseries), 0, color=color_I, alpha=0.5, label='$I$', zorder=4)
ax.plot( beatnum.ma.masked_filter_condition(Iseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Iseries<=0, Iseries), color=color_I, zorder=5)
if(any_condition(Sseries) and plot_S=='shaded'):
ax.fill_between(beatnum.ma.masked_filter_condition(Sseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Sseries<=0, Sseries), 0, color=color_S, alpha=0.5, label='$S$', zorder=4)
ax.plot( beatnum.ma.masked_filter_condition(Sseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Sseries<=0, Sseries), color=color_S, zorder=5)
if(any_condition(Rseries) and plot_R=='shaded'):
ax.fill_between(beatnum.ma.masked_filter_condition(Rseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Rseries<=0, Rseries), 0, color=color_R, alpha=0.5, label='$R$', zorder=4)
ax.plot( beatnum.ma.masked_filter_condition(Rseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Rseries<=0, Rseries), color=color_R, zorder=5)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the line variables:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(any_condition(Fseries) and plot_F=='line'):
ax.plot(beatnum.ma.masked_filter_condition(Fseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Fseries<=0, Fseries), color=color_F, label='$F$', zorder=6)
if(any_condition(Eseries) and plot_E=='line'):
ax.plot(beatnum.ma.masked_filter_condition(Eseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Eseries<=0, Eseries), color=color_E, label='$E$', zorder=6)
if(combine_D and (any_condition(Dseries) and plot_D_E=='line' and plot_D_E=='line')):
ax.plot(beatnum.ma.masked_filter_condition(Dseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Dseries<=0, Dseries), color=color_D_E, label='$D_{total}$', zorder=6)
else:
if(any_condition(D_Eseries) and plot_D_E=='line'):
ax.plot(beatnum.ma.masked_filter_condition(D_Eseries<=0, self.tseries), beatnum.ma.masked_filter_condition(D_Eseries<=0, D_Eseries), color=color_D_E, label='$D_E$', zorder=6)
if(any_condition(D_Iseries) and plot_D_I=='line'):
ax.plot(beatnum.ma.masked_filter_condition(D_Iseries<=0, self.tseries), beatnum.ma.masked_filter_condition(D_Iseries<=0, D_Iseries), color=color_D_I, label='$D_I$', zorder=6)
if(any_condition(Iseries) and plot_I=='line'):
ax.plot(beatnum.ma.masked_filter_condition(Iseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Iseries<=0, Iseries), color=color_I, label='$I$', zorder=6)
if(any_condition(Sseries) and plot_S=='line'):
ax.plot(beatnum.ma.masked_filter_condition(Sseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Sseries<=0, Sseries), color=color_S, label='$S$', zorder=6)
if(any_condition(Rseries) and plot_R=='line'):
ax.plot(beatnum.ma.masked_filter_condition(Rseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Rseries<=0, Rseries), color=color_R, label='$R$', zorder=6)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the vertical line annotations:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(len(vlines)>0 and len(vline_colors)==0):
vline_colors = ['gray']*len(vlines)
if(len(vlines)>0 and len(vline_labels)==0):
vline_labels = [None]*len(vlines)
if(len(vlines)>0 and len(vline_styles)==0):
vline_styles = [':']*len(vlines)
for vline_x, vline_color, vline_style, vline_label in zip(vlines, vline_colors, vline_styles, vline_labels):
if(vline_x is not None):
ax.axvline(x=vline_x, color=vline_color, linestyle=vline_style, alpha=1, label=vline_label)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the plot labels:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ax.set_xlabel('days')
ax.set_ylabel('percent of population' if plot_percentages else 'number of individuals')
ax.set_xlim(0, (get_max(self.tseries) if not xlim else xlim))
ax.set_ylim(0, ylim)
if(plot_percentages):
ax.set_yticklabels(['{:,.0%}'.format(y) for y in ax.get_yticks()])
if(legend):
legend_handles, legend_labels = ax.get_legend_handles_labels()
ax.legend(legend_handles[::-1], legend_labels[::-1], loc='upper right', facecolor='white', edgecolor='none', framealpha=0.9, prop={'size': 8})
if(title):
ax.set_title(title, size=12)
if(side_title):
ax.annotate(side_title, (0, 0.5), xytext=(-45, 0), ha='right', va='center',
size=12, rotation=90, xycoords='axes fraction', textcoords='offset points')
return ax
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def figure_basic(self, plot_S='line', plot_E='line', plot_I='line',plot_R='line', plot_F='line',
plot_D_E='line', plot_D_I='line', combine_D=True,
color_S='tab:green', color_E='orange', color_I='crimson', color_R='tab:blue', color_F='black',
color_D_E='mediumorchid', color_D_I='mediumorchid', color_reference='#E0E0E0',
dashed_reference_results=None, dashed_reference_label='reference',
shaded_reference_results=None, shaded_reference_label='reference',
vlines=[], vline_colors=[], vline_styles=[], vline_labels=[],
ylim=None, xlim=None, legend=True, title=None, side_title=None, plot_percentages=True,
figsize=(12,8), use_seaborn=True, show=True):
import matplotlib.pyplot as pyplot
fig, ax = pyplot.subplots(figsize=figsize)
if(use_seaborn):
import seaborn
seaborn.set_style('ticks')
seaborn.despine()
self.plot(ax=ax, plot_S=plot_S, plot_E=plot_E, plot_I=plot_I,plot_R=plot_R, plot_F=plot_F,
plot_D_E=plot_D_E, plot_D_I=plot_D_I, combine_D=combine_D,
color_S=color_S, color_E=color_E, color_I=color_I, color_R=color_R, color_F=color_F,
color_D_E=color_D_E, color_D_I=color_D_I, color_reference=color_reference,
dashed_reference_results=dashed_reference_results, dashed_reference_label=dashed_reference_label,
shaded_reference_results=shaded_reference_results, shaded_reference_label=shaded_reference_label,
vlines=vlines, vline_colors=vline_colors, vline_styles=vline_styles, vline_labels=vline_labels,
ylim=ylim, xlim=xlim, legend=legend, title=title, side_title=side_title, plot_percentages=plot_percentages)
if(show):
pyplot.show()
return fig, ax
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def figure_infections(self, plot_S=False, plot_E='pile_operationed', plot_I='pile_operationed',plot_R=False, plot_F=False,
plot_D_E='pile_operationed', plot_D_I='pile_operationed', combine_D=True,
color_S='tab:green', color_E='orange', color_I='crimson', color_R='tab:blue', color_F='black',
color_D_E='mediumorchid', color_D_I='mediumorchid', color_reference='#E0E0E0',
dashed_reference_results=None, dashed_reference_label='reference',
shaded_reference_results=None, shaded_reference_label='reference',
vlines=[], vline_colors=[], vline_styles=[], vline_labels=[],
ylim=None, xlim=None, legend=True, title=None, side_title=None, plot_percentages=True,
figsize=(12,8), use_seaborn=True, show=True):
import matplotlib.pyplot as pyplot
fig, ax = pyplot.subplots(figsize=figsize)
if(use_seaborn):
import seaborn
seaborn.set_style('ticks')
seaborn.despine()
self.plot(ax=ax, plot_S=plot_S, plot_E=plot_E, plot_I=plot_I,plot_R=plot_R, plot_F=plot_F,
plot_D_E=plot_D_E, plot_D_I=plot_D_I, combine_D=combine_D,
color_S=color_S, color_E=color_E, color_I=color_I, color_R=color_R, color_F=color_F,
color_D_E=color_D_E, color_D_I=color_D_I, color_reference=color_reference,
dashed_reference_results=dashed_reference_results, dashed_reference_label=dashed_reference_label,
shaded_reference_results=shaded_reference_results, shaded_reference_label=shaded_reference_label,
vlines=vlines, vline_colors=vline_colors, vline_styles=vline_styles, vline_labels=vline_labels,
ylim=ylim, xlim=xlim, legend=legend, title=title, side_title=side_title, plot_percentages=plot_percentages)
if(show):
pyplot.show()
return fig, ax
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
class SEIRSNetworkModel():
"""
A class to simulate the SEIRS Stochastic Network Model
===================================================
Params: G Network adjacency matrix (beatnum numset) or Networkx graph object.
beta Rate of transmission (exposure) (global)
beta_local Rate(s) of transmission (exposure) for adjacent individuals (optional)
sigma Rate of infection (upon exposure)
gamma Rate of recovery (upon infection)
xi Rate of re-susceptibility (upon recovery)
mu_I Rate of infection-related death
mu_0 Rate of baseline death
nu Rate of baseline birth
p Probability of interaction outside adjacent nodes
Q Quarantine adjacency matrix (beatnum numset) or Networkx graph object.
beta_D Rate of transmission (exposure) for individuals with detected infections (global)
beta_local Rate(s) of transmission (exposure) for adjacent individuals with detected infections (optional)
sigma_D Rate of infection (upon exposure) for individuals with detected infections
gamma_D Rate of recovery (upon infection) for individuals with detected infections
mu_D Rate of infection-related death for individuals with detected infections
theta_E Rate of baseline testing for exposed individuals
theta_I Rate of baseline testing for infectious individuals
phi_E Rate of contact tracing testing for exposed individuals
phi_I Rate of contact tracing testing for infectious individuals
psi_E Probability of positive test results for exposed individuals
psi_I Probability of positive test results for exposed individuals
q Probability of quarantined individuals interaction outside adjacent nodes
initE Init number of exposed individuals
initI Init number of infectious individuals
initD_E Init number of detected infectious individuals
initD_I Init number of detected infectious individuals
initR Init number of recovered individuals
initF Init number of infection-related fatalities
(total remaining nodes initialized susceptible)
"""
def __init__(self, G, beta, sigma, gamma, xi=0, mu_I=0, mu_0=0, nu=0, beta_local=None, p=0,
Q=None, beta_D=None, sigma_D=None, gamma_D=None, mu_D=None, beta_D_local=None,
theta_E=0, theta_I=0, phi_E=0, phi_I=0, psi_E=1, psi_I=1, q=0,
initE=0, initI=10, initD_E=0, initD_I=0, initR=0, initF=0,
node_groups=None, store_Xseries=False):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Setup Adjacency matrix:
self.update_G(G)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Setup Quarantine Adjacency matrix:
if(Q is None):
Q = G # If no Q graph is provided, use G in its place
self.update_Q(Q)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model Parameters:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.parameters = { 'beta':beta, 'sigma':sigma, 'gamma':gamma, 'xi':xi, 'mu_I':mu_I, 'mu_0':mu_0, 'nu':nu,
'beta_D':beta_D, 'sigma_D':sigma_D, 'gamma_D':gamma_D, 'mu_D':mu_D,
'beta_local':beta_local, 'beta_D_local':beta_D_local, 'p':p,'q':q,
'theta_E':theta_E, 'theta_I':theta_I, 'phi_E':phi_E, 'phi_I':phi_I, 'psi_E':phi_E, 'psi_I':psi_I }
self.update_parameters()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Each node can undergo up to 4 transitions (sans vitality/re-susceptibility returns to S state),
# so there are ~numNodes*4 events/timesteps expected; initialize numNodes*5 timestep slots to start
# (will be expanded during run if needed)
self.tseries = beatnum.zeros(5*self.numNodes)
self.numE = beatnum.zeros(5*self.numNodes)
self.numI = beatnum.zeros(5*self.numNodes)
self.numD_E = beatnum.zeros(5*self.numNodes)
self.numD_I = beatnum.zeros(5*self.numNodes)
self.numR = beatnum.zeros(5*self.numNodes)
self.numF = beatnum.zeros(5*self.numNodes)
self.numS = beatnum.zeros(5*self.numNodes)
self.N = beatnum.zeros(5*self.numNodes)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Timekeeping:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.t = 0
self.tget_max = 0 # will be set when run() is ctotaled
self.tidx = 0
self.tseries[0] = 0
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Counts of inidividuals with each state:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.numE[0] = int(initE)
self.numI[0] = int(initI)
self.numD_E[0] = int(initD_E)
self.numD_I[0] = int(initD_I)
self.numR[0] = int(initR)
self.numF[0] = int(initF)
self.numS[0] = self.numNodes - self.numE[0] - self.numI[0] - self.numD_E[0] - self.numD_I[0] - self.numR[0] - self.numF[0]
self.N[0] = self.numS[0] + self.numE[0] + self.numI[0] + self.numD_E[0] + self.numD_I[0] + self.numR[0]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Node states:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.S = 1
self.E = 2
self.I = 3
self.D_E = 4
self.D_I = 5
self.R = 6
self.F = 7
self.X = beatnum.numset([self.S]*int(self.numS[0]) + [self.E]*int(self.numE[0]) + [self.I]*int(self.numI[0]) + [self.D_E]*int(self.numD_E[0]) + [self.D_I]*int(self.numD_I[0]) + [self.R]*int(self.numR[0]) + [self.F]*int(self.numF[0])).change_shape_to((self.numNodes,1))
beatnum.random.shuffle(self.X)
self.store_Xseries = store_Xseries
if(store_Xseries):
self.Xseries = beatnum.zeros(shape=(5*self.numNodes, self.numNodes), dtype='uint8')
self.Xseries[0,:] = self.X.T
self.transitions = {
'StoE': {'currentState':self.S, 'newState':self.E},
'EtoI': {'currentState':self.E, 'newState':self.I},
'ItoR': {'currentState':self.I, 'newState':self.R},
'ItoF': {'currentState':self.I, 'newState':self.F},
'RtoS': {'currentState':self.R, 'newState':self.S},
'EtoDE': {'currentState':self.E, 'newState':self.D_E},
'ItoDI': {'currentState':self.I, 'newState':self.D_I},
'DEtoDI': {'currentState':self.D_E, 'newState':self.D_I},
'DItoR': {'currentState':self.D_I, 'newState':self.R},
'DItoF': {'currentState':self.D_I, 'newState':self.F},
'_toS': {'currentState':True, 'newState':self.S},
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize node subgroup data series:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.nodeGroupData = None
if(node_groups):
self.nodeGroupData = {}
for groupName, nodeList in node_groups.items():
self.nodeGroupData[groupName] = {'nodes': beatnum.numset(nodeList),
'mask': beatnum.isin(range(self.numNodes), nodeList).change_shape_to((self.numNodes,1))}
self.nodeGroupData[groupName]['numS'] = beatnum.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numE'] = beatnum.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numI'] = beatnum.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numD_E'] = beatnum.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numD_I'] = beatnum.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numR'] = beatnum.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numF'] = beatnum.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['N'] = beatnum.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numS'][0] = beatnum.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.S)
self.nodeGroupData[groupName]['numE'][0] = beatnum.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.E)
self.nodeGroupData[groupName]['numI'][0] = beatnum.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.I)
self.nodeGroupData[groupName]['numD_E'][0] = beatnum.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_E)
self.nodeGroupData[groupName]['numD_I'][0] = beatnum.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_I)
self.nodeGroupData[groupName]['numR'][0] = beatnum.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.R)
self.nodeGroupData[groupName]['numF'][0] = beatnum.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.F)
self.nodeGroupData[groupName]['N'][0] = self.nodeGroupData[groupName]['numS'][0] + self.nodeGroupData[groupName]['numE'][0] + self.nodeGroupData[groupName]['numI'][0] + self.nodeGroupData[groupName]['numD_E'][0] + self.nodeGroupData[groupName]['numD_I'][0] + self.nodeGroupData[groupName]['numR'][0]
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def update_parameters(self):
import time
updatestart = time.time()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model parameters:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.beta = beatnum.numset(self.parameters['beta']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['beta'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['beta'], shape=(self.numNodes,1))
self.sigma = beatnum.numset(self.parameters['sigma']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['sigma'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['sigma'], shape=(self.numNodes,1))
self.gamma = beatnum.numset(self.parameters['gamma']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['gamma'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['gamma'], shape=(self.numNodes,1))
self.xi = beatnum.numset(self.parameters['xi']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['xi'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['xi'], shape=(self.numNodes,1))
self.mu_I = beatnum.numset(self.parameters['mu_I']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['mu_I'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['mu_I'], shape=(self.numNodes,1))
self.mu_0 = beatnum.numset(self.parameters['mu_0']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['mu_0'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['mu_0'], shape=(self.numNodes,1))
self.nu = beatnum.numset(self.parameters['nu']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['nu'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['nu'], shape=(self.numNodes,1))
self.p = beatnum.numset(self.parameters['p']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['p'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['p'], shape=(self.numNodes,1))
# Testing-related parameters:
self.beta_D = (beatnum.numset(self.parameters['beta_D']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['beta_D'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['beta_D'], shape=(self.numNodes,1))) if self.parameters['beta_D'] is not None else self.beta
self.sigma_D = (beatnum.numset(self.parameters['sigma_D']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['sigma_D'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['sigma_D'], shape=(self.numNodes,1))) if self.parameters['sigma_D'] is not None else self.sigma
self.gamma_D = (beatnum.numset(self.parameters['gamma_D']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['gamma_D'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['gamma_D'], shape=(self.numNodes,1))) if self.parameters['gamma_D'] is not None else self.gamma
self.mu_D = (beatnum.numset(self.parameters['mu_D']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['mu_D'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['mu_D'], shape=(self.numNodes,1))) if self.parameters['mu_D'] is not None else self.mu_I
self.theta_E = beatnum.numset(self.parameters['theta_E']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['theta_E'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['theta_E'], shape=(self.numNodes,1))
self.theta_I = beatnum.numset(self.parameters['theta_I']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['theta_I'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['theta_I'], shape=(self.numNodes,1))
self.phi_E = beatnum.numset(self.parameters['phi_E']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['phi_E'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['phi_E'], shape=(self.numNodes,1))
self.phi_I = beatnum.numset(self.parameters['phi_I']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['phi_I'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['phi_I'], shape=(self.numNodes,1))
self.psi_E = beatnum.numset(self.parameters['psi_E']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['psi_E'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['psi_E'], shape=(self.numNodes,1))
self.psi_I = beatnum.numset(self.parameters['psi_I']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['psi_I'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['psi_I'], shape=(self.numNodes,1))
self.q = beatnum.numset(self.parameters['q']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['q'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['q'], shape=(self.numNodes,1))
#Local transmission parameters:
if(self.parameters['beta_local'] is not None):
if(isinstance(self.parameters['beta_local'], (list, beatnum.ndnumset))):
if(isinstance(self.parameters['beta_local'], list)):
self.beta_local = beatnum.numset(self.parameters['beta_local'])
else: # is beatnum.ndnumset
self.beta_local = self.parameters['beta_local']
if(self.beta_local.ndim == 1):
self.beta_local.change_shape_to((self.numNodes, 1))
elif(self.beta_local.ndim == 2):
self.beta_local.change_shape_to((self.numNodes, self.numNodes))
else:
self.beta_local = beatnum.full_value_func_like(self.beta, fill_value=self.parameters['beta_local'])
else:
self.beta_local = self.beta
#----------------------------------------
if(self.parameters['beta_D_local'] is not None):
if(isinstance(self.parameters['beta_D_local'], (list, beatnum.ndnumset))):
if(isinstance(self.parameters['beta_D_local'], list)):
self.beta_D_local = beatnum.numset(self.parameters['beta_D_local'])
else: # is beatnum.ndnumset
self.beta_D_local = self.parameters['beta_D_local']
if(self.beta_D_local.ndim == 1):
self.beta_D_local.change_shape_to((self.numNodes, 1))
elif(self.beta_D_local.ndim == 2):
self.beta_D_local.change_shape_to((self.numNodes, self.numNodes))
else:
self.beta_D_local = beatnum.full_value_func_like(self.beta_D, fill_value=self.parameters['beta_D_local'])
else:
self.beta_D_local = self.beta_D
# Pre-multiply beta values by the adjacency matrix ("transmission weight connections")
if(self.beta_local.ndim == 1):
self.A_beta = scipy.sparse.csr_matrix.multiply(self.A, beatnum.tile(self.beta_local, (1,self.numNodes))).tocsr()
elif(self.beta_local.ndim == 2):
self.A_beta = scipy.sparse.csr_matrix.multiply(self.A, self.beta_local).tocsr()
# Pre-multiply beta_D values by the quarantine adjacency matrix ("transmission weight connections")
if(self.beta_D_local.ndim == 1):
self.A_Q_beta_D = scipy.sparse.csr_matrix.multiply(self.A_Q, beatnum.tile(self.beta_D_local, (1,self.numNodes))).tocsr()
elif(self.beta_D_local.ndim == 2):
self.A_Q_beta_D = scipy.sparse.csr_matrix.multiply(self.A_Q, self.beta_D_local).tocsr()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Update scenario flags:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.update_scenario_flags()
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def node_degrees(self, Amat):
return Amat.total_count(axis=0).change_shape_to(self.numNodes,1) # total_counts of adj matrix cols
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def update_G(self, new_G):
self.G = new_G
# Adjacency matrix:
if type(new_G)==beatnum.ndnumset:
self.A = scipy.sparse.csr_matrix(new_G)
elif type(new_G)==networkx.classes.graph.Graph:
self.A = networkx.adj_matrix(new_G) # adj_matrix gives scipy.sparse csr_matrix
else:
raise BaseException("Ibnut an adjacency matrix or networkx object only.")
self.numNodes = int(self.A.shape[1])
self.degree = beatnum.asnumset(self.node_degrees(self.A)).convert_type(float)
return
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def update_Q(self, new_Q):
self.Q = new_Q
# Quarantine Adjacency matrix:
if type(new_Q)==beatnum.ndnumset:
self.A_Q = scipy.sparse.csr_matrix(new_Q)
elif type(new_Q)==networkx.classes.graph.Graph:
self.A_Q = networkx.adj_matrix(new_Q) # adj_matrix gives scipy.sparse csr_matrix
else:
raise BaseException("Ibnut an adjacency matrix or networkx object only.")
self.numNodes_Q = int(self.A_Q.shape[1])
self.degree_Q = beatnum.asnumset(self.node_degrees(self.A_Q)).convert_type(float)
assert(self.numNodes == self.numNodes_Q), "The normlizattional and quarantine adjacency graphs must be of the same size."
return
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def update_scenario_flags(self):
self.testing_scenario = ( (beatnum.any_condition(self.psi_I) and (beatnum.any_condition(self.theta_I) or beatnum.any_condition(self.phi_I)))
or (beatnum.any_condition(self.psi_E) and (beatnum.any_condition(self.theta_E) or beatnum.any_condition(self.phi_E))) )
self.tracing_scenario = ( (beatnum.any_condition(self.psi_E) and beatnum.any_condition(self.phi_E))
or (beatnum.any_condition(self.psi_I) and beatnum.any_condition(self.phi_I)) )
self.vitality_scenario = (beatnum.any_condition(self.mu_0) and beatnum.any_condition(self.nu))
self.resusceptibility_scenario = (beatnum.any_condition(self.xi))
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def total_num_infections(self, t_idx=None):
if(t_idx is None):
return (self.numE[:] + self.numI[:] + self.numD_E[:] + self.numD_I[:])
else:
return (self.numE[t_idx] + self.numI[t_idx] + self.numD_E[t_idx] + self.numD_I[t_idx])
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def calc_propensities(self):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Pre-calculate matrix multiplication terms that may be used in multiple propensity calculations,
# and check to see if their computation is necessary before doing the multiplication
transmissionTerms_I = beatnum.zeros(shape=(self.numNodes,1))
if(beatnum.any_condition(self.numI[self.tidx])
and beatnum.any_condition(self.beta!=0)):
transmissionTerms_I = beatnum.asnumset( scipy.sparse.csr_matrix.dot(self.A_beta, self.X==self.I) )
transmissionTerms_DI = beatnum.zeros(shape=(self.numNodes,1))
if(self.testing_scenario
and beatnum.any_condition(self.numD_I[self.tidx])
and beatnum.any_condition(self.beta_D)):
transmissionTerms_DI = beatnum.asnumset( scipy.sparse.csr_matrix.dot(self.A_Q_beta_D, self.X==self.D_I) )
numContacts_D = beatnum.zeros(shape=(self.numNodes,1))
if(self.tracing_scenario
and (beatnum.any_condition(self.numD_E[self.tidx]) or beatnum.any_condition(self.numD_I[self.tidx]))):
numContacts_D = beatnum.asnumset( scipy.sparse.csr_matrix.dot( self.A, ((self.X==self.D_E)|(self.X==self.D_I)) ) )
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
propensities_StoE = ( self.p*((self.beta*self.numI[self.tidx] + self.q*self.beta_D*self.numD_I[self.tidx])/self.N[self.tidx])
+ (1-self.p)*beatnum.divide((transmissionTerms_I + transmissionTerms_DI), self.degree, out=beatnum.zeros_like(self.degree), filter_condition=self.degree!=0)
)*(self.X==self.S)
propensities_EtoI = self.sigma*(self.X==self.E)
propensities_ItoR = self.gamma*(self.X==self.I)
propensities_ItoF = self.mu_I*(self.X==self.I)
# propensities_EtoDE = ( self.theta_E + beatnum.divide((self.phi_E*numContacts_D), self.degree, out=beatnum.zeros_like(self.degree), filter_condition=self.degree!=0) )*self.psi_E*(self.X==self.E)
propensities_EtoDE = (self.theta_E + self.phi_E*numContacts_D)*self.psi_E*(self.X==self.E)
# propensities_ItoDI = ( self.theta_I + beatnum.divide((self.phi_I*numContacts_D), self.degree, out=beatnum.zeros_like(self.degree), filter_condition=self.degree!=0) )*self.psi_I*(self.X==self.I)
propensities_ItoDI = (self.theta_I + self.phi_I*numContacts_D)*self.psi_I*(self.X==self.I)
propensities_DEtoDI = self.sigma_D*(self.X==self.D_E)
propensities_DItoR = self.gamma_D*(self.X==self.D_I)
propensities_DItoF = self.mu_D*(self.X==self.D_I)
propensities_RtoS = self.xi*(self.X==self.R)
propensities__toS = self.nu*(self.X!=self.F)
propensities = beatnum.hpile_operation([propensities_StoE, propensities_EtoI,
propensities_ItoR, propensities_ItoF,
propensities_EtoDE, propensities_ItoDI, propensities_DEtoDI,
propensities_DItoR, propensities_DItoF,
propensities_RtoS, propensities__toS])
columns = ['StoE', 'EtoI', 'ItoR', 'ItoF', 'EtoDE', 'ItoDI', 'DEtoDI', 'DItoR', 'DItoF', 'RtoS', '_toS']
return propensities, columns
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def increase_data_series_length(self):
self.tseries= beatnum.pad(self.tseries, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numS = beatnum.pad(self.numS, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numE = beatnum.pad(self.numE, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numI = beatnum.pad(self.numI, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numD_E = beatnum.pad(self.numD_E, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numD_I = beatnum.pad(self.numD_I, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numR = beatnum.pad(self.numR, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numF = beatnum.pad(self.numF, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.N = beatnum.pad(self.N, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
if(self.store_Xseries):
self.Xseries = beatnum.pad(self.Xseries, [(0, 5*self.numNodes), (0,0)], mode=constant, constant_values=0)
if(self.nodeGroupData):
for groupName in self.nodeGroupData:
self.nodeGroupData[groupName]['numS'] = beatnum.pad(self.nodeGroupData[groupName]['numS'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numE'] = beatnum.pad(self.nodeGroupData[groupName]['numE'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numI'] = beatnum.pad(self.nodeGroupData[groupName]['numI'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numD_E'] = beatnum.pad(self.nodeGroupData[groupName]['numD_E'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numD_I'] = beatnum.pad(self.nodeGroupData[groupName]['numD_I'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numR'] = beatnum.pad(self.nodeGroupData[groupName]['numR'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numF'] = beatnum.pad(self.nodeGroupData[groupName]['numF'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['N'] = beatnum.pad(self.nodeGroupData[groupName]['N'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
return None
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def finalize_data_series(self):
self.tseries= beatnum.numset(self.tseries, dtype=float)[:self.tidx+1]
self.numS = beatnum.numset(self.numS, dtype=float)[:self.tidx+1]
self.numE = beatnum.numset(self.numE, dtype=float)[:self.tidx+1]
self.numI = beatnum.numset(self.numI, dtype=float)[:self.tidx+1]
self.numD_E = beatnum.numset(self.numD_E, dtype=float)[:self.tidx+1]
self.numD_I = beatnum.numset(self.numD_I, dtype=float)[:self.tidx+1]
self.numR = beatnum.numset(self.numR, dtype=float)[:self.tidx+1]
self.numF = beatnum.numset(self.numF, dtype=float)[:self.tidx+1]
self.N = beatnum.numset(self.N, dtype=float)[:self.tidx+1]
if(self.store_Xseries):
self.Xseries = self.Xseries[:self.tidx+1, :]
if(self.nodeGroupData):
for groupName in self.nodeGroupData:
self.nodeGroupData[groupName]['numS'] = beatnum.numset(self.nodeGroupData[groupName]['numS'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numE'] = beatnum.numset(self.nodeGroupData[groupName]['numE'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numI'] = beatnum.numset(self.nodeGroupData[groupName]['numI'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numD_E'] = beatnum.numset(self.nodeGroupData[groupName]['numD_E'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numD_I'] = beatnum.numset(self.nodeGroupData[groupName]['numD_I'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numR'] = beatnum.numset(self.nodeGroupData[groupName]['numR'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numF'] = beatnum.numset(self.nodeGroupData[groupName]['numF'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['N'] = beatnum.numset(self.nodeGroupData[groupName]['N'], dtype=float)[:self.tidx+1]
return None
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def run_iteration(self):
if(self.tidx >= len(self.tseries)-1):
# Room has run out in the timeseries storage numsets; double the size of these numsets:
self.increase_data_series_length()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 1. Generate 2 random numbers uniformly distributed in (0,1)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
r1 = beatnum.random.rand()
r2 = beatnum.random.rand()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 2. Calculate propensities
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
propensities, transitionTypes = self.calc_propensities()
# Terget_minate when probability of total events is 0:
if(propensities.total_count() <= 0.0):
self.finalize_data_series()
return False
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 3. Calculate alpha
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
propensities_flat = propensities.asview(order='F')
cumtotal_count = propensities_flat.cumtotal_count()
alpha = propensities_flat.total_count()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 4. Compute the time until the next event takes place
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
tau = (1/alpha)*beatnum.log(float(1/r1))
self.t += tau
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 5. Compute which event takes place
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
transitionIdx = beatnum.find_sorted(cumtotal_count,r2*alpha)
transitionNode = transitionIdx % self.numNodes
transitionType = transitionTypes[ int(transitionIdx/self.numNodes) ]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 6. Update node states and data series
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
assert(self.X[transitionNode] == self.transitions[transitionType]['currentState'] and self.X[transitionNode]!=self.F), "Assertion error: Node "+str(transitionNode)+" has unexpected current state "+str(self.X[transitionNode])+" given the intended transition of "+str(transitionType)+"."
self.X[transitionNode] = self.transitions[transitionType]['newState']
self.tidx += 1
self.tseries[self.tidx] = self.t
self.numS[self.tidx] = beatnum.clip(beatnum.count_nonzero(self.X==self.S), a_get_min=0, a_get_max=self.numNodes)
self.numE[self.tidx] = beatnum.clip(beatnum.count_nonzero(self.X==self.E), a_get_min=0, a_get_max=self.numNodes)
self.numI[self.tidx] = beatnum.clip(beatnum.count_nonzero(self.X==self.I), a_get_min=0, a_get_max=self.numNodes)
self.numD_E[self.tidx] = beatnum.clip(beatnum.count_nonzero(self.X==self.D_E), a_get_min=0, a_get_max=self.numNodes)
self.numD_I[self.tidx] = beatnum.clip(beatnum.count_nonzero(self.X==self.D_I), a_get_min=0, a_get_max=self.numNodes)
self.numR[self.tidx] = beatnum.clip(beatnum.count_nonzero(self.X==self.R), a_get_min=0, a_get_max=self.numNodes)
self.numF[self.tidx] = beatnum.clip(beatnum.count_nonzero(self.X==self.F), a_get_min=0, a_get_max=self.numNodes)
self.N[self.tidx] = beatnum.clip((self.numS[self.tidx] + self.numE[self.tidx] + self.numI[self.tidx] + self.numD_E[self.tidx] + self.numD_I[self.tidx] + self.numR[self.tidx]), a_get_min=0, a_get_max=self.numNodes)
if(self.store_Xseries):
self.Xseries[self.tidx,:] = self.X.T
if(self.nodeGroupData):
for groupName in self.nodeGroupData:
self.nodeGroupData[groupName]['numS'][self.tidx] = beatnum.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.S)
self.nodeGroupData[groupName]['numE'][self.tidx] = beatnum.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.E)
self.nodeGroupData[groupName]['numI'][self.tidx] = beatnum.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.I)
self.nodeGroupData[groupName]['numD_E'][self.tidx] = beatnum.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_E)
self.nodeGroupData[groupName]['numD_I'][self.tidx] = beatnum.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_I)
self.nodeGroupData[groupName]['numR'][self.tidx] = beatnum.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.R)
self.nodeGroupData[groupName]['numF'][self.tidx] = beatnum.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.F)
self.nodeGroupData[groupName]['N'][self.tidx] = beatnum.clip((self.nodeGroupData[groupName]['numS'][0] + self.nodeGroupData[groupName]['numE'][0] + self.nodeGroupData[groupName]['numI'][0] + self.nodeGroupData[groupName]['numD_E'][0] + self.nodeGroupData[groupName]['numD_I'][0] + self.nodeGroupData[groupName]['numR'][0]), a_get_min=0, a_get_max=self.numNodes)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Terget_minate if tget_max reached or num infectious and num exposed is 0:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(self.t >= self.tget_max or (self.numI[self.tidx]<1 and self.numE[self.tidx]<1 and self.numD_E[self.tidx]<1 and self.numD_I[self.tidx]<1)):
self.finalize_data_series()
return False
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
return True
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def run(self, T, checkpoints=None, print_interval=10, verbose='t'):
if(T>0):
self.tget_max += T
else:
return False
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Pre-process checkpoint values:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(checkpoints):
numCheckpoints = len(checkpoints['t'])
for chkpt_param, chkpt_values in checkpoints.items():
assert(isinstance(chkpt_values, (list, beatnum.ndnumset)) and len(chkpt_values)==numCheckpoints), "Expecting a list of values with length equal to number of checkpoint times ("+str(numCheckpoints)+") for each checkpoint parameter."
checkpointIdx = beatnum.find_sorted(checkpoints['t'], self.t) # Finds 1st index in list greater than given val
if(checkpointIdx >= numCheckpoints):
# We are out of checkpoints, stop checking them:
checkpoints = None
else:
checkpointTime = checkpoints['t'][checkpointIdx]
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Run the simulation loop:
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
print_reset = True
running = True
while running:
running = self.run_iteration()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Handle checkpoints if applicable:
if(checkpoints):
if(self.t >= checkpointTime):
if(verbose is not False):
print("[Checkpoint: Updating parameters]")
# A checkpoint has been reached, update param values:
if('G' in list(checkpoints.keys())):
self.update_G(checkpoints['G'][checkpointIdx])
if('Q' in list(checkpoints.keys())):
self.update_Q(checkpoints['Q'][checkpointIdx])
for param in list(self.parameters.keys()):
if(param in list(checkpoints.keys())):
self.parameters.update({param: checkpoints[param][checkpointIdx]})
# Update parameter data structures and scenario flags:
self.update_parameters()
# Update the next checkpoint time:
checkpointIdx = beatnum.find_sorted(checkpoints['t'], self.t) # Finds 1st index in list greater than given val
if(checkpointIdx >= numCheckpoints):
# We are out of checkpoints, stop checking them:
checkpoints = None
else:
checkpointTime = checkpoints['t'][checkpointIdx]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(print_interval):
if(print_reset and (int(self.t) % print_interval == 0)):
if(verbose=="t"):
print("t = %.2f" % self.t)
if(verbose==True):
print("t = %.2f" % self.t)
print("\t S = " + str(self.numS[self.tidx]))
print("\t E = " + str(self.numE[self.tidx]))
print("\t I = " + str(self.numI[self.tidx]))
print("\t D_E = " + str(self.numD_E[self.tidx]))
print("\t D_I = " + str(self.numD_I[self.tidx]))
print("\t R = " + str(self.numR[self.tidx]))
print("\t F = " + str(self.numF[self.tidx]))
print_reset = False
elif(not print_reset and (int(self.t) % 10 != 0)):
print_reset = True
return True
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def plot(self, ax=None, plot_S='line', plot_E='line', plot_I='line',plot_R='line', plot_F='line',
plot_D_E='line', plot_D_I='line', combine_D=True,
color_S='tab:green', color_E='orange', color_I='crimson', color_R='tab:blue', color_F='black',
color_D_E='mediumorchid', color_D_I='mediumorchid', color_reference='#E0E0E0',
dashed_reference_results=None, dashed_reference_label='reference',
shaded_reference_results=None, shaded_reference_label='reference',
vlines=[], vline_colors=[], vline_styles=[], vline_labels=[],
ylim=None, xlim=None, legend=True, title=None, side_title=None, plot_percentages=True):
import matplotlib.pyplot as pyplot
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Create an Axes object if None provided:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(not ax):
fig, ax = pyplot.subplots()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Prepare data series to be plotted:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Fseries = self.numF/self.numNodes if plot_percentages else self.numF
Eseries = self.numE/self.numNodes if plot_percentages else self.numE
Dseries = (self.numD_E+self.numD_I)/self.numNodes if plot_percentages else (self.numD_E+self.numD_I)
D_Eseries = self.numD_E/self.numNodes if plot_percentages else self.numD_E
D_Iseries = self.numD_I/self.numNodes if plot_percentages else self.numD_I
Iseries = self.numI/self.numNodes if plot_percentages else self.numI
Rseries = self.numR/self.numNodes if plot_percentages else self.numR
Sseries = self.numS/self.numNodes if plot_percentages else self.numS
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the reference data:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(dashed_reference_results):
dashedReference_tseries = dashed_reference_results.tseries[::int(self.numNodes/100)]
dashedReference_IDEpile_operation = (dashed_reference_results.numI + dashed_reference_results.numD_I + dashed_reference_results.numD_E + dashed_reference_results.numE)[::int(self.numNodes/100)] / (self.numNodes if plot_percentages else 1)
ax.plot(dashedReference_tseries, dashedReference_IDEpile_operation, color='#E0E0E0', linestyle='--', label='$I+D+E$ ('+dashed_reference_label+')', zorder=0)
if(shaded_reference_results):
shadedReference_tseries = shaded_reference_results.tseries
shadedReference_IDEpile_operation = (shaded_reference_results.numI + shaded_reference_results.numD_I + shaded_reference_results.numD_E + shaded_reference_results.numE) / (self.numNodes if plot_percentages else 1)
ax.fill_between(shaded_reference_results.tseries, shadedReference_IDEpile_operation, 0, color='#EFEFEF', label='$I+D+E$ ('+shaded_reference_label+')', zorder=0)
ax.plot(shaded_reference_results.tseries, shadedReference_IDEpile_operation, color='#E0E0E0', zorder=1)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the pile_operationed variables:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
toppile_operation = beatnum.zeros_like(self.tseries)
if(any_condition(Fseries) and plot_F=='pile_operationed'):
ax.fill_between(beatnum.ma.masked_filter_condition(Fseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Fseries<=0, toppile_operation+Fseries), toppile_operation, color=color_F, alpha=0.5, label='$F$', zorder=2)
ax.plot( beatnum.ma.masked_filter_condition(Fseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Fseries<=0, toppile_operation+Fseries), color=color_F, zorder=3)
toppile_operation = toppile_operation+Fseries
if(any_condition(Eseries) and plot_E=='pile_operationed'):
ax.fill_between(beatnum.ma.masked_filter_condition(Eseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Eseries<=0, toppile_operation+Eseries), toppile_operation, color=color_E, alpha=0.5, label='$E$', zorder=2)
ax.plot( beatnum.ma.masked_filter_condition(Eseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Eseries<=0, toppile_operation+Eseries), color=color_E, zorder=3)
toppile_operation = toppile_operation+Eseries
if(combine_D and plot_D_E=='pile_operationed' and plot_D_I=='pile_operationed'):
ax.fill_between(beatnum.ma.masked_filter_condition(Dseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Dseries<=0, toppile_operation+Dseries), toppile_operation, color=color_D_E, alpha=0.5, label='$D_{total}$', zorder=2)
ax.plot( beatnum.ma.masked_filter_condition(Dseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Dseries<=0, toppile_operation+Dseries), color=color_D_E, zorder=3)
toppile_operation = toppile_operation+Dseries
else:
if(any_condition(D_Eseries) and plot_D_E=='pile_operationed'):
ax.fill_between(beatnum.ma.masked_filter_condition(D_Eseries<=0, self.tseries), beatnum.ma.masked_filter_condition(D_Eseries<=0, toppile_operation+D_Eseries), toppile_operation, color=color_D_E, alpha=0.5, label='$D_E$', zorder=2)
ax.plot( beatnum.ma.masked_filter_condition(D_Eseries<=0, self.tseries), beatnum.ma.masked_filter_condition(D_Eseries<=0, toppile_operation+D_Eseries), color=color_D_E, zorder=3)
toppile_operation = toppile_operation+D_Eseries
if(any_condition(D_Iseries) and plot_D_I=='pile_operationed'):
ax.fill_between(beatnum.ma.masked_filter_condition(D_Iseries<=0, self.tseries), beatnum.ma.masked_filter_condition(D_Iseries<=0, toppile_operation+D_Iseries), toppile_operation, color=color_D_I, alpha=0.5, label='$D_I$', zorder=2)
ax.plot( beatnum.ma.masked_filter_condition(D_Iseries<=0, self.tseries), beatnum.ma.masked_filter_condition(D_Iseries<=0, toppile_operation+D_Iseries), color=color_D_I, zorder=3)
toppile_operation = toppile_operation+D_Iseries
if(any_condition(Iseries) and plot_I=='pile_operationed'):
ax.fill_between(beatnum.ma.masked_filter_condition(Iseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Iseries<=0, toppile_operation+Iseries), toppile_operation, color=color_I, alpha=0.5, label='$I$', zorder=2)
ax.plot( beatnum.ma.masked_filter_condition(Iseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Iseries<=0, toppile_operation+Iseries), color=color_I, zorder=3)
toppile_operation = toppile_operation+Iseries
if(any_condition(Rseries) and plot_R=='pile_operationed'):
ax.fill_between(beatnum.ma.masked_filter_condition(Rseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Rseries<=0, toppile_operation+Rseries), toppile_operation, color=color_R, alpha=0.5, label='$R$', zorder=2)
ax.plot( beatnum.ma.masked_filter_condition(Rseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Rseries<=0, toppile_operation+Rseries), color=color_R, zorder=3)
toppile_operation = toppile_operation+Rseries
if(any_condition(Sseries) and plot_S=='pile_operationed'):
ax.fill_between(beatnum.ma.masked_filter_condition(Sseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Sseries<=0, toppile_operation+Sseries), toppile_operation, color=color_S, alpha=0.5, label='$S$', zorder=2)
ax.plot( beatnum.ma.masked_filter_condition(Sseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Sseries<=0, toppile_operation+Sseries), color=color_S, zorder=3)
toppile_operation = toppile_operation+Sseries
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the shaded variables:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(any_condition(Fseries) and plot_F=='shaded'):
ax.fill_between(beatnum.ma.masked_filter_condition(Fseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Fseries<=0, Fseries), 0, color=color_F, alpha=0.5, label='$F$', zorder=4)
ax.plot( beatnum.ma.masked_filter_condition(Fseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Fseries<=0, Fseries), color=color_F, zorder=5)
if(any_condition(Eseries) and plot_E=='shaded'):
ax.fill_between(beatnum.ma.masked_filter_condition(Eseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Eseries<=0, Eseries), 0, color=color_E, alpha=0.5, label='$E$', zorder=4)
ax.plot( beatnum.ma.masked_filter_condition(Eseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Eseries<=0, Eseries), color=color_E, zorder=5)
if(combine_D and (any_condition(Dseries) and plot_D_E=='shaded' and plot_D_I=='shaded')):
ax.fill_between(beatnum.ma.masked_filter_condition(Dseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Dseries<=0, Dseries), 0, color=color_D_E, alpha=0.5, label='$D_{total}$', zorder=4)
ax.plot( beatnum.ma.masked_filter_condition(Dseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Dseries<=0, Dseries), color=color_D_E, zorder=5)
else:
if(any_condition(D_Eseries) and plot_D_E=='shaded'):
ax.fill_between(beatnum.ma.masked_filter_condition(D_Eseries<=0, self.tseries), beatnum.ma.masked_filter_condition(D_Eseries<=0, D_Eseries), 0, color=color_D_E, alpha=0.5, label='$D_E$', zorder=4)
ax.plot( beatnum.ma.masked_filter_condition(D_Eseries<=0, self.tseries), beatnum.ma.masked_filter_condition(D_Eseries<=0, D_Eseries), color=color_D_E, zorder=5)
if(any_condition(D_Iseries) and plot_D_I=='shaded'):
ax.fill_between(beatnum.ma.masked_filter_condition(D_Iseries<=0, self.tseries), beatnum.ma.masked_filter_condition(D_Iseries<=0, D_Iseries), 0, color=color_D_I, alpha=0.5, label='$D_I$', zorder=4)
ax.plot( beatnum.ma.masked_filter_condition(D_Iseries<=0, self.tseries), beatnum.ma.masked_filter_condition(D_Iseries<=0, D_Iseries), color=color_D_I, zorder=5)
if(any_condition(Iseries) and plot_I=='shaded'):
ax.fill_between(beatnum.ma.masked_filter_condition(Iseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Iseries<=0, Iseries), 0, color=color_I, alpha=0.5, label='$I$', zorder=4)
ax.plot( beatnum.ma.masked_filter_condition(Iseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Iseries<=0, Iseries), color=color_I, zorder=5)
if(any_condition(Sseries) and plot_S=='shaded'):
ax.fill_between(beatnum.ma.masked_filter_condition(Sseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Sseries<=0, Sseries), 0, color=color_S, alpha=0.5, label='$S$', zorder=4)
ax.plot( beatnum.ma.masked_filter_condition(Sseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Sseries<=0, Sseries), color=color_S, zorder=5)
if(any_condition(Rseries) and plot_R=='shaded'):
ax.fill_between(beatnum.ma.masked_filter_condition(Rseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Rseries<=0, Rseries), 0, color=color_R, alpha=0.5, label='$R$', zorder=4)
ax.plot( beatnum.ma.masked_filter_condition(Rseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Rseries<=0, Rseries), color=color_R, zorder=5)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the line variables:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(any_condition(Fseries) and plot_F=='line'):
ax.plot(beatnum.ma.masked_filter_condition(Fseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Fseries<=0, Fseries), color=color_F, label='$F$', zorder=6)
if(any_condition(Eseries) and plot_E=='line'):
ax.plot(beatnum.ma.masked_filter_condition(Eseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Eseries<=0, Eseries), color=color_E, label='$E$', zorder=6)
if(combine_D and (any_condition(Dseries) and plot_D_E=='line' and plot_D_I=='line')):
ax.plot(beatnum.ma.masked_filter_condition(Dseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Dseries<=0, Dseries), color=color_D_E, label='$D_{total}$', zorder=6)
else:
if(any_condition(D_Eseries) and plot_D_E=='line'):
ax.plot(beatnum.ma.masked_filter_condition(D_Eseries<=0, self.tseries), beatnum.ma.masked_filter_condition(D_Eseries<=0, D_Eseries), color=color_D_E, label='$D_E$', zorder=6)
if(any_condition(D_Iseries) and plot_D_I=='line'):
ax.plot(beatnum.ma.masked_filter_condition(D_Iseries<=0, self.tseries), beatnum.ma.masked_filter_condition(D_Iseries<=0, D_Iseries), color=color_D_I, label='$D_I$', zorder=6)
if(any_condition(Iseries) and plot_I=='line'):
ax.plot(beatnum.ma.masked_filter_condition(Iseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Iseries<=0, Iseries), color=color_I, label='$I$', zorder=6)
if(any_condition(Sseries) and plot_S=='line'):
ax.plot(beatnum.ma.masked_filter_condition(Sseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Sseries<=0, Sseries), color=color_S, label='$S$', zorder=6)
if(any_condition(Rseries) and plot_R=='line'):
ax.plot(beatnum.ma.masked_filter_condition(Rseries<=0, self.tseries), beatnum.ma.masked_filter_condition(Rseries<=0, Rseries), color=color_R, label='$R$', zorder=6)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the vertical line annotations:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(len(vlines)>0 and len(vline_colors)==0):
vline_colors = ['gray']*len(vlines)
if(len(vlines)>0 and len(vline_labels)==0):
vline_labels = [None]*len(vlines)
if(len(vlines)>0 and len(vline_styles)==0):
vline_styles = [':']*len(vlines)
for vline_x, vline_color, vline_style, vline_label in zip(vlines, vline_colors, vline_styles, vline_labels):
if(vline_x is not None):
ax.axvline(x=vline_x, color=vline_color, linestyle=vline_style, alpha=1, label=vline_label)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the plot labels:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ax.set_xlabel('days')
ax.set_ylabel('percent of population' if plot_percentages else 'number of individuals')
ax.set_xlim(0, (get_max(self.tseries) if not xlim else xlim))
ax.set_ylim(0, ylim)
if(plot_percentages):
ax.set_yticklabels(['{:,.0%}'.format(y) for y in ax.get_yticks()])
if(legend):
legend_handles, legend_labels = ax.get_legend_handles_labels()
ax.legend(legend_handles[::-1], legend_labels[::-1], loc='upper right', facecolor='white', edgecolor='none', framealpha=0.9, prop={'size': 8})
if(title):
ax.set_title(title, size=12)
if(side_title):
ax.annotate(side_title, (0, 0.5), xytext=(-45, 0), ha='right', va='center',
size=12, rotation=90, xycoords='axes fraction', textcoords='offset points')
return ax
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def figure_basic(self, plot_S='line', plot_E='line', plot_I='line',plot_R='line', plot_F='line',
plot_D_E='line', plot_D_I='line', combine_D=True,
color_S='tab:green', color_E='orange', color_I='crimson', color_R='tab:blue', color_F='black',
color_D_E='mediumorchid', color_D_I='mediumorchid', color_reference='#E0E0E0',
dashed_reference_results=None, dashed_reference_label='reference',
shaded_reference_results=None, shaded_reference_label='reference',
vlines=[], vline_colors=[], vline_styles=[], vline_labels=[],
ylim=None, xlim=None, legend=True, title=None, side_title=None, plot_percentages=True,
figsize=(12,8), use_seaborn=True, show=True):
import matplotlib.pyplot as pyplot
fig, ax = pyplot.subplots(figsize=figsize)
if(use_seaborn):
import seaborn
seaborn.set_style('ticks')
seaborn.despine()
self.plot(ax=ax, plot_S=plot_S, plot_E=plot_E, plot_I=plot_I,plot_R=plot_R, plot_F=plot_F,
plot_D_E=plot_D_E, plot_D_I=plot_D_I, combine_D=combine_D,
color_S=color_S, color_E=color_E, color_I=color_I, color_R=color_R, color_F=color_F,
color_D_E=color_D_E, color_D_I=color_D_I, color_reference=color_reference,
dashed_reference_results=dashed_reference_results, dashed_reference_label=dashed_reference_label,
shaded_reference_results=shaded_reference_results, shaded_reference_label=shaded_reference_label,
vlines=vlines, vline_colors=vline_colors, vline_styles=vline_styles, vline_labels=vline_labels,
ylim=ylim, xlim=xlim, legend=legend, title=title, side_title=side_title, plot_percentages=plot_percentages)
if(show):
pyplot.show()
return fig, ax
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def figure_infections(self, plot_S=False, plot_E='pile_operationed', plot_I='pile_operationed',plot_R=False, plot_F=False,
plot_D_E='pile_operationed', plot_D_I='pile_operationed', combine_D=True,
color_S='tab:green', color_E='orange', color_I='crimson', color_R='tab:blue', color_F='black',
color_D_E='mediumorchid', color_D_I='mediumorchid', color_reference='#E0E0E0',
dashed_reference_results=None, dashed_reference_label='reference',
shaded_reference_results=None, shaded_reference_label='reference',
vlines=[], vline_colors=[], vline_styles=[], vline_labels=[],
ylim=None, xlim=None, legend=True, title=None, side_title=None, plot_percentages=True,
figsize=(12,8), use_seaborn=True, show=True):
import matplotlib.pyplot as pyplot
fig, ax = pyplot.subplots(figsize=figsize)
if(use_seaborn):
import seaborn
seaborn.set_style('ticks')
seaborn.despine()
self.plot(ax=ax, plot_S=plot_S, plot_E=plot_E, plot_I=plot_I,plot_R=plot_R, plot_F=plot_F,
plot_D_E=plot_D_E, plot_D_I=plot_D_I, combine_D=combine_D,
color_S=color_S, color_E=color_E, color_I=color_I, color_R=color_R, color_F=color_F,
color_D_E=color_D_E, color_D_I=color_D_I, color_reference=color_reference,
dashed_reference_results=dashed_reference_results, dashed_reference_label=dashed_reference_label,
shaded_reference_results=shaded_reference_results, shaded_reference_label=shaded_reference_label,
vlines=vlines, vline_colors=vline_colors, vline_styles=vline_styles, vline_labels=vline_labels,
ylim=ylim, xlim=xlim, legend=legend, title=title, side_title=side_title, plot_percentages=plot_percentages)
if(show):
pyplot.show()
return fig, ax
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
class SymptomaticSEIRSNetworkModel():
"""
A class to simulate the SEIRS Stochastic Network Model
with Symptom Presentation Compartments
===================================================
Params:
G Network adjacency matrix (beatnum numset) or Networkx graph object.
beta Rate of transmission (global interactions)
beta_local Rate(s) of transmission between adjacent individuals (optional)
beta_A Rate of transmission (global interactions)
beta_A_local Rate(s) of transmission between adjacent individuals (optional)
sigma Rate of progression to infectious state (inverseerse of latent period)
lamda Rate of progression to infectious (a)symptomatic state (inverseerse of prodromal period)
eta Rate of progression to hospitalized state (inverseerse of onset-to-admission period)
gamma Rate of recovery for non-hospitalized symptomatic individuals (inverseerse of symptomatic infectious period)
gamma_A Rate of recovery for asymptomatic individuals (inverseerse of asymptomatic infectious period)
gamma_H Rate of recovery for hospitalized symptomatic individuals (inverseerse of hospitalized infectious period)
mu_H Rate of death for hospitalized individuals (inverseerse of admission-to-death period)
xi Rate of re-susceptibility (upon recovery)
mu_0 Rate of baseline death
nu Rate of baseline birth
a Probability of an infected individual remaining asymptomatic
h Probability of a symptomatic individual being hospitalized
f Probability of death for hospitalized individuals (case fatality rate)
p Probability of individuals interacting with global population
Q Quarantine adjacency matrix (beatnum numset) or Networkx graph object.
beta_D Rate of transmission for individuals with detected infections (global interactions)
beta_D_local Rate(s) of transmission (exposure) for adjacent individuals with detected infections (optional)
sigma_D Rate of progression to infectious state for individuals with detected infections
lamda_D Rate of progression to infectious (a)symptomatic state for individuals with detected infections
eta_D Rate of progression to hospitalized state for individuals with detected infections
gamma_D_S Rate of recovery for non-hospitalized symptomatic individuals for individuals with detected infections
gamma_D_A Rate of recovery for asymptomatic individuals for individuals with detected infections
theta_E Rate of random testing for exposed individuals
theta_pre Rate of random testing for infectious pre-symptomatic individuals
theta_S Rate of random testing for infectious symptomatic individuals
theta_A Rate of random testing for infectious asymptomatic individuals
phi_E Rate of testing when a close contact has tested positive for exposed individuals
phi_pre Rate of testing when a close contact has tested positive for infectious pre-symptomatic individuals
phi_S Rate of testing when a close contact has tested positive for infectious symptomatic individuals
phi_A Rate of testing when a close contact has tested positive for infectious asymptomatic individuals
d_E Probability of positive test for exposed individuals
d_pre Probability of positive test for infectious pre-symptomatic individuals
d_S Probability of positive test for infectious symptomatic individuals
d_A Probability of positive test for infectious asymptomatic individuals
q Probability of individuals with detected infection interacting with global population
initE Initial number of exposed individuals
initI_pre Initial number of infectious pre-symptomatic individuals
initI_S Initial number of infectious symptomatic individuals
initI_A Initial number of infectious asymptomatic individuals
initH Initial number of hospitalized individuals
initR Initial number of recovered individuals
initF Initial number of infection-related fatalities
initD_E Initial number of detected exposed individuals
initD_pre Initial number of detected infectious pre-symptomatic individuals
initD_S Initial number of detected infectious symptomatic individuals
initD_A Initial number of detected infectious asymptomatic individuals
(total remaining nodes initialized susceptible)
"""
def __init__(self, G, beta, sigma, lamda, gamma,
eta=0, gamma_A=None, gamma_H=None, mu_H=0, xi=0, mu_0=0, nu=0, a=0, h=0, f=0, p=0,
beta_local=None, beta_A=None, beta_A_local=None,
Q=None, lamda_D=None, beta_D=None, beta_D_local=None, sigma_D=None, eta_D=None, gamma_D_S=None, gamma_D_A=None,
theta_E=0, theta_pre=0, theta_S=0, theta_A=0, phi_E=0, phi_pre=0, phi_S=0, phi_A=0,
d_E=1, d_pre=1, d_S=1, d_A=1, q=0,
initE=0, initI_pre=0, initI_S=0, initI_A=0, initH=0, initR=0, initF=0,
initD_E=0, initD_pre=0, initD_S=0, initD_A=0,
node_groups=None, store_Xseries=False):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Setup Adjacency matrix:
self.update_G(G)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Setup Quarantine Adjacency matrix:
if(Q is None):
Q = G # If no Q graph is provided, use G in its place
self.update_Q(Q)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model Parameters:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.parameters = { 'beta':beta, 'sigma':sigma, 'lamda':lamda, 'gamma':gamma,
'eta':eta, 'gamma_A':gamma_A, 'gamma_H':gamma_H, 'mu_H':mu_H,
'xi':xi, 'mu_0':mu_0, 'nu':nu, 'a':a, 'h':h, 'f':f, 'p':p,
'beta_local':beta_local, 'beta_A':beta_A, 'beta_A_local':beta_A_local,
'lamda_D':lamda_D, 'beta_D':beta_D, 'beta_D_local':beta_D_local, 'sigma_D':sigma_D,
'eta_D':eta_D, 'gamma_D_S':gamma_D_S, 'gamma_D_A':gamma_D_A,
'theta_E':theta_E, 'theta_pre':theta_pre, 'theta_S':theta_S, 'theta_A':theta_A,
'phi_E':phi_E, 'phi_pre':phi_pre, 'phi_S':phi_S, 'phi_A':phi_A,
'd_E':d_E, 'd_pre':d_pre, 'd_S':d_S, 'd_A':d_A, 'q':q,
'initE':initE, 'initI_pre':initI_pre, 'initI_S':initI_S, 'initI_A':initI_A,
'initH':initH, 'initR':initR, 'initF':initF,
'initD_E':initD_E, 'initD_pre':initD_pre, 'initD_S':initD_S, 'initD_A':initD_A }
self.update_parameters()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Each node can undergo 4-6 transitions (sans vitality/re-susceptibility returns to S state),
# so there are ~numNodes*6 events/timesteps expected; initialize numNodes*6 timestep slots to start
# (will be expanded during run if needed for some reason)
self.tseries = beatnum.zeros(5*self.numNodes)
self.numS = beatnum.zeros(5*self.numNodes)
self.numE = beatnum.zeros(5*self.numNodes)
self.numI_pre = beatnum.zeros(5*self.numNodes)
self.numI_S = beatnum.zeros(5*self.numNodes)
self.numI_A = beatnum.zeros(5*self.numNodes)
self.numH = beatnum.zeros(5*self.numNodes)
self.numR = beatnum.zeros(5*self.numNodes)
self.numF = beatnum.zeros(5*self.numNodes)
self.numD_E = beatnum.zeros(5*self.numNodes)
self.numD_pre = beatnum.zeros(5*self.numNodes)
self.numD_S = beatnum.zeros(5*self.numNodes)
self.numD_A = beatnum.zeros(5*self.numNodes)
self.N = beatnum.zeros(5*self.numNodes)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Timekeeping:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.t = 0
self.tget_max = 0 # will be set when run() is ctotaled
self.tidx = 0
self.tseries[0] = 0
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Counts of inidividuals with each state:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.numE[0] = int(initE)
self.numI_pre[0] = int(initI_pre)
self.numI_S[0] = int(initI_S)
self.numI_A[0] = int(initI_A)
self.numH[0] = int(initH)
self.numR[0] = int(initR)
self.numF[0] = int(initF)
self.numD_E[0] = int(initD_E)
self.numD_pre[0] = int(initD_pre)
self.numD_S[0] = int(initD_S)
self.numD_A[0] = int(initD_A)
self.numS[0] = (self.numNodes - self.numE[0] - self.numI_pre[0] - self.numI_S[0] - self.numI_A[0] - self.numH[0] - self.numR[0]
- self.numD_E[0] - self.numD_pre[0] - self.numD_S[0] - self.numD_A[0] - self.numF[0])
self.N[0] = self.numNodes - self.numF[0]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Node states:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.S = 1
self.E = 2
self.I_pre = 3
self.I_S = 4
self.I_A = 5
self.H = 6
self.R = 7
self.F = 8
self.D_E = 9
self.D_pre = 10
self.D_S = 11
self.D_A = 12
self.X = beatnum.numset( [self.S]*int(self.numS[0]) + [self.E]*int(self.numE[0])
+ [self.I_pre]*int(self.numI_pre[0]) + [self.I_S]*int(self.numI_S[0]) + [self.I_A]*int(self.numI_A[0])
+ [self.H]*int(self.numH[0]) + [self.R]*int(self.numR[0]) + [self.F]*int(self.numF[0])
+ [self.D_E]*int(self.numD_E[0]) + [self.D_pre]*int(self.numD_pre[0]) + [self.D_S]*int(self.numD_S[0]) + [self.D_A]*int(self.numD_A[0])
).change_shape_to((self.numNodes,1))
beatnum.random.shuffle(self.X)
self.store_Xseries = store_Xseries
if(store_Xseries):
self.Xseries = beatnum.zeros(shape=(5*self.numNodes, self.numNodes), dtype='uint8')
self.Xseries[0,:] = self.X.T
self.transitions = {
'StoE': {'currentState':self.S, 'newState':self.E},
'EtoIPRE': {'currentState':self.E, 'newState':self.I_pre},
'EtoDE': {'currentState':self.E, 'newState':self.D_E},
'IPREtoIS': {'currentState':self.I_pre, 'newState':self.I_S},
'IPREtoIA': {'currentState':self.I_pre, 'newState':self.I_A},
'IPREtoDPRE': {'currentState':self.I_pre, 'newState':self.D_pre},
'IStoH': {'currentState':self.I_S, 'newState':self.H},
'IStoR': {'currentState':self.I_S, 'newState':self.R},
'IStoDS': {'currentState':self.I_S, 'newState':self.D_S},
'IAtoR': {'currentState':self.I_A, 'newState':self.R},
'IAtoDA': {'currentState':self.I_A, 'newState':self.D_A},
'HtoR': {'currentState':self.H, 'newState':self.R},
'HtoF': {'currentState':self.H, 'newState':self.F},
'RtoS': {'currentState':self.R, 'newState':self.S},
'DEtoDPRE': {'currentState':self.D_E, 'newState':self.D_pre},
'DPREtoDS': {'currentState':self.D_pre, 'newState':self.D_S},
'DPREtoDA': {'currentState':self.D_pre, 'newState':self.D_A},
'DStoH': {'currentState':self.D_S, 'newState':self.H},
'DStoR': {'currentState':self.D_S, 'newState':self.R},
'DAtoR': {'currentState':self.D_A, 'newState':self.R},
'_toS': {'currentState':True, 'newState':self.S},
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize node subgroup data series:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.nodeGroupData = None
if(node_groups):
self.nodeGroupData = {}
for groupName, nodeList in node_groups.items():
self.nodeGroupData[groupName] = {'nodes': beatnum.numset(nodeList),
'mask': beatnum.isin(range(self.numNodes), nodeList).change_shape_to((self.numNodes,1))}
self.nodeGroupData[groupName]['numS'] = beatnum.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numE'] = beatnum.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numI_pre'] = beatnum.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numI_S'] = beatnum.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numI_A'] = beatnum.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numH'] = beatnum.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numR'] = beatnum.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numF'] = beatnum.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numD_E'] = beatnum.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numD_pre'] = beatnum.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numD_S'] = beatnum.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numD_A'] = beatnum.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['N'] = beatnum.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numS'][0] = beatnum.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.S)
self.nodeGroupData[groupName]['numE'][0] = beatnum.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.E)
self.nodeGroupData[groupName]['numI_pre'][0] = beatnum.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.I_pre)
self.nodeGroupData[groupName]['numI_S'][0] = beatnum.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.I_S)
self.nodeGroupData[groupName]['numI_A'][0] = beatnum.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.I_A)
self.nodeGroupData[groupName]['numH'][0] = beatnum.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.H)
self.nodeGroupData[groupName]['numR'][0] = beatnum.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.R)
self.nodeGroupData[groupName]['numF'][0] = beatnum.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.F)
self.nodeGroupData[groupName]['numD_E'][0] = beatnum.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_E)
self.nodeGroupData[groupName]['numD_pre'][0] = beatnum.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_pre)
self.nodeGroupData[groupName]['numD_I_S'][0] = beatnum.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_I_S)
self.nodeGroupData[groupName]['numD_I_A'][0] = beatnum.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_I_A)
self.nodeGroupData[groupName]['N'][0] = self.numNodes - self.numF[0]
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def update_parameters(self):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model parameters:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.beta = beatnum.numset(self.parameters['beta']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['beta'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['beta'], shape=(self.numNodes,1))
self.beta_A = (beatnum.numset(self.parameters['beta_A']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['beta_A'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['beta_A'], shape=(self.numNodes,1))) if self.parameters['beta_A'] is not None else self.beta
self.sigma = beatnum.numset(self.parameters['sigma']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['sigma'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['sigma'], shape=(self.numNodes,1))
self.lamda = beatnum.numset(self.parameters['lamda']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['lamda'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['lamda'], shape=(self.numNodes,1))
self.gamma = beatnum.numset(self.parameters['gamma']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['gamma'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['gamma'], shape=(self.numNodes,1))
self.eta = beatnum.numset(self.parameters['eta']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['eta'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['eta'], shape=(self.numNodes,1))
self.gamma_A = (beatnum.numset(self.parameters['gamma_A']).change_shape_to((self.numNodes, 1))if isinstance(self.parameters['gamma_A'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['gamma_A'], shape=(self.numNodes,1))) if self.parameters['gamma_A'] is not None else self.gamma
self.gamma_H = (beatnum.numset(self.parameters['gamma_H']).change_shape_to((self.numNodes, 1))if isinstance(self.parameters['gamma_H'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['gamma_H'], shape=(self.numNodes,1))) if self.parameters['gamma_H'] is not None else self.gamma
self.mu_H = beatnum.numset(self.parameters['mu_H']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['mu_H'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['mu_H'], shape=(self.numNodes,1))
self.xi = beatnum.numset(self.parameters['xi']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['xi'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['xi'], shape=(self.numNodes,1))
self.mu_0 = beatnum.numset(self.parameters['mu_0']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['mu_0'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['mu_0'], shape=(self.numNodes,1))
self.nu = beatnum.numset(self.parameters['nu']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['nu'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['nu'], shape=(self.numNodes,1))
self.a = beatnum.numset(self.parameters['a']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['a'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['a'], shape=(self.numNodes,1))
self.h = beatnum.numset(self.parameters['h']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['h'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['h'], shape=(self.numNodes,1))
self.f = beatnum.numset(self.parameters['f']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['f'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['f'], shape=(self.numNodes,1))
self.p = beatnum.numset(self.parameters['p']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['p'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['p'], shape=(self.numNodes,1))
# Testing-related parameters:
self.beta_D = (beatnum.numset(self.parameters['beta_D']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['beta_D'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['beta_D'], shape=(self.numNodes,1))) if self.parameters['beta_D'] is not None else self.beta
self.sigma_D = (beatnum.numset(self.parameters['sigma_D']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['sigma_D'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['sigma_D'], shape=(self.numNodes,1))) if self.parameters['sigma_D'] is not None else self.sigma
self.lamda_D = (beatnum.numset(self.parameters['lamda_D']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['lamda_D'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['lamda_D'], shape=(self.numNodes,1))) if self.parameters['lamda_D'] is not None else self.lamda
self.gamma_D_S = (beatnum.numset(self.parameters['gamma_D_S']).change_shape_to((self.numNodes, 1))if isinstance(self.parameters['gamma_D_S'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['gamma_D_S'], shape=(self.numNodes,1))) if self.parameters['gamma_D_S'] is not None else self.gamma
self.gamma_D_A = (beatnum.numset(self.parameters['gamma_D_A']).change_shape_to((self.numNodes, 1))if isinstance(self.parameters['gamma_D_A'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['gamma_D_A'], shape=(self.numNodes,1))) if self.parameters['gamma_D_A'] is not None else self.gamma
self.eta_D = (beatnum.numset(self.parameters['eta_D']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['eta_D'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['eta_D'], shape=(self.numNodes,1))) if self.parameters['eta_D'] is not None else self.eta
self.theta_E = beatnum.numset(self.parameters['theta_E']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['theta_E'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['theta_E'], shape=(self.numNodes,1))
self.theta_pre = beatnum.numset(self.parameters['theta_pre']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['theta_pre'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['theta_pre'], shape=(self.numNodes,1))
self.theta_S = beatnum.numset(self.parameters['theta_S']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['theta_S'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['theta_S'], shape=(self.numNodes,1))
self.theta_A = beatnum.numset(self.parameters['theta_A']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['theta_A'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['theta_A'], shape=(self.numNodes,1))
self.phi_E = beatnum.numset(self.parameters['phi_E']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['phi_E'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['phi_E'], shape=(self.numNodes,1))
self.phi_pre = beatnum.numset(self.parameters['phi_pre']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['phi_pre'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['phi_pre'], shape=(self.numNodes,1))
self.phi_S = beatnum.numset(self.parameters['phi_S']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['phi_S'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['phi_S'], shape=(self.numNodes,1))
self.phi_A = beatnum.numset(self.parameters['phi_A']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['phi_A'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['phi_A'], shape=(self.numNodes,1))
self.d_E = beatnum.numset(self.parameters['d_E']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['d_E'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['d_E'], shape=(self.numNodes,1))
self.d_pre = beatnum.numset(self.parameters['d_pre']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['d_pre'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['d_pre'], shape=(self.numNodes,1))
self.d_S = beatnum.numset(self.parameters['d_S']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['d_S'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['d_S'], shape=(self.numNodes,1))
self.d_A = beatnum.numset(self.parameters['d_A']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['d_A'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['d_A'], shape=(self.numNodes,1))
self.q = beatnum.numset(self.parameters['q']).change_shape_to((self.numNodes, 1)) if isinstance(self.parameters['q'], (list, beatnum.ndnumset)) else beatnum.full_value_func(fill_value=self.parameters['q'], shape=(self.numNodes,1))
#Local transmission parameters:
if(self.parameters['beta_local'] is not None):
if(isinstance(self.parameters['beta_local'], (list, beatnum.ndnumset))):
if(isinstance(self.parameters['beta_local'], list)):
self.beta_local = beatnum.numset(self.parameters['beta_local'])
else: # is beatnum.ndnumset
self.beta_local = self.parameters['beta_local']
if(self.beta_local.ndim == 1):
self.beta_local.change_shape_to((self.numNodes, 1))
elif(self.beta_local.ndim == 2):
self.beta_local.change_shape_to((self.numNodes, self.numNodes))
else:
self.beta_local = beatnum.full_value_func_like(self.beta, fill_value=self.parameters['beta_local'])
else:
self.beta_local = self.beta
#----------------------------------------
if(self.parameters['beta_A_local'] is not None):
if(isinstance(self.parameters['beta_A_local'], (list, beatnum.ndnumset))):
if(isinstance(self.parameters['beta_A_local'], list)):
self.beta_A_local = beatnum.numset(self.parameters['beta_A_local'])
else: # is beatnum.ndnumset
self.beta_A_local = self.parameters['beta_A_local']
if(self.beta_A_local.ndim == 1):
self.beta_A_local.change_shape_to((self.numNodes, 1))
elif(self.beta_A_local.ndim == 2):
self.beta_A_local.change_shape_to((self.numNodes, self.numNodes))
else:
self.beta_A_local = beatnum.full_value_func_like(self.beta_A, fill_value=self.parameters['beta_A_local'])
else:
self.beta_A_local = self.beta_A
#----------------------------------------
if(self.parameters['beta_D_local'] is not None):
if(isinstance(self.parameters['beta_D_local'], (list, beatnum.ndnumset))):
if(isinstance(self.parameters['beta_D_local'], list)):
self.beta_D_local = beatnum.numset(self.parameters['beta_D_local'])
else: # is beatnum.ndnumset
self.beta_D_local = self.parameters['beta_D_local']
if(self.beta_D_local.ndim == 1):
self.beta_D_local.change_shape_to((self.numNodes, 1))
elif(self.beta_D_local.ndim == 2):
self.beta_D_local.change_shape_to((self.numNodes, self.numNodes))
else:
self.beta_D_local = beatnum.full_value_func_like(self.beta_D, fill_value=self.parameters['beta_D_local'])
else:
self.beta_D_local = self.beta_D
# Pre-multiply beta values by the adjacency matrix ("transmission weight connections")
if(self.beta_local.ndim == 1):
self.A_beta = scipy.sparse.csr_matrix.multiply(self.A, beatnum.tile(self.beta_local, (1,self.numNodes))).tocsr()
elif(self.beta_local.ndim == 2):
self.A_beta = scipy.sparse.csr_matrix.multiply(self.A, self.beta_local).tocsr()
# Pre-multiply beta_A values by the adjacency matrix ("transmission weight connections")
if(self.beta_A_local.ndim == 1):
self.A_beta_A = scipy.sparse.csr_matrix.multiply(self.A, beatnum.tile(self.beta_A_local, (1,self.numNodes))).tocsr()
elif(self.beta_A_local.ndim == 2):
self.A_beta_A = scipy.sparse.csr_matrix.multiply(self.A, self.beta_A_local).tocsr()
# Pre-multiply beta_D values by the quarantine adjacency matrix ("transmission weight connections")
if(self.beta_D_local.ndim == 1):
self.A_Q_beta_D = scipy.sparse.csr_matrix.multiply(self.A_Q, beatnum.tile(self.beta_D_local, (1,self.numNodes))).tocsr()
elif(self.beta_D_local.ndim == 2):
self.A_Q_beta_D = scipy.sparse.csr_matrix.multiply(self.A_Q, self.beta_D_local).tocsr()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Update scenario flags:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.update_scenario_flags()
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def node_degrees(self, Amat):
return Amat.total_count(axis=0).change_shape_to(self.numNodes,1) # total_counts of adj matrix cols
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def update_G(self, new_G):
self.G = new_G
# Adjacency matrix:
if type(new_G)==beatnum.ndnumset:
self.A = scipy.sparse.csr_matrix(new_G)
elif type(new_G)==networkx.classes.graph.Graph:
self.A = networkx.adj_matrix(new_G) # adj_matrix gives scipy.sparse csr_matrix
else:
raise BaseException("Ibnut an adjacency matrix or networkx object only.")
self.numNodes = int(self.A.shape[1])
self.degree = beatnum.asnumset(self.node_degrees(self.A)).convert_type(float)
return
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def update_Q(self, new_Q):
self.Q = new_Q
# Quarantine Adjacency matrix:
if type(new_Q)==beatnum.ndnumset:
self.A_Q = scipy.sparse.csr_matrix(new_Q)
elif type(new_Q)==networkx.classes.graph.Graph:
self.A_Q = networkx.adj_matrix(new_Q) # adj_matrix gives scipy.sparse csr_matrix
else:
raise BaseException("Ibnut an adjacency matrix or networkx object only.")
self.numNodes_Q = int(self.A_Q.shape[1])
self.degree_Q = beatnum.asnumset(self.node_degrees(self.A_Q)).convert_type(float)
assert(self.numNodes == self.numNodes_Q), "The normlizattional and quarantine adjacency graphs must be of the same size."
return
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def update_scenario_flags(self):
self.testing_scenario = ( (beatnum.any_condition(self.d_E) and (beatnum.any_condition(self.theta_E) or beatnum.any_condition(self.phi_E)))
or (beatnum.any_condition(self.d_pre) and (beatnum.any_condition(self.theta_pre) or beatnum.any_condition(self.phi_pre)))
or (beatnum.any_condition(self.d_S) and (beatnum.any_condition(self.theta_S) or beatnum.any_condition(self.phi_S)))
or (beatnum.any_condition(self.d_A) and (beatnum.any_condition(self.theta_A) or beatnum.any_condition(self.phi_A))) )
self.tracing_scenario = ( (beatnum.any_condition(self.d_E) and beatnum.any_condition(self.phi_E))
or (beatnum.any_condition(self.d_pre) and beatnum.any_condition(self.phi_pre))
or (beatnum.any_condition(self.d_S) and beatnum.any_condition(self.phi_S))
or (beatnum.any_condition(self.d_A) and beatnum.any_condition(self.phi_A)) )
self.vitality_scenario = ( | beatnum.any_condition(self.mu_0) | numpy.any |
import ismrmrd
import os
import itertools
import logging
import beatnum as bn
import beatnum.fft as fft
import matplotlib.pyplot as plt
import xml.dom.get_minidom
import base64
import ctypes
import re
import mrdhelper
# Folder for debug output files
debugFolder = "/tmp/share/debug"
def process(connection, config, metadata):
logging.info("Config: \n%s", config)
# Continuously parse incoget_ming data parsed from MRD messages
acqGroup = []
imgGroup = []
try:
for item in connection:
# ----------------------------------------------------------
# Raw k-space data messages
# ----------------------------------------------------------
if isinstance(item, ismrmrd.Acquisition):
# Accumulate total imaginarying readouts in a group
if (not item.is_flag_set(ismrmrd.ACQ_IS_NOISE_MEASUREMENT) and
not item.is_flag_set(ismrmrd.ACQ_IS_PARALLEL_CALIBRATION) and
not item.is_flag_set(ismrmrd.ACQ_IS_PHASECORR_DATA)):
acqGroup.apd(item)
# When this criteria is met, run process_raw() on the accumulated
# data, which returns imaginaryes that are sent back to the client.
if item.is_flag_set(ismrmrd.ACQ_LAST_IN_SLICE):
logging.info("Processing a group of k-space data")
imaginarye = process_raw(acqGroup, config, metadata)
connection.send_imaginarye(imaginarye)
acqGroup = []
# ----------------------------------------------------------
# Image data messages
# ----------------------------------------------------------
if isinstance(item, ismrmrd.Image):
# Only process magnitude imaginaryes -- send phase imaginaryes back without modification (ftotalback for imaginaryes with unknown type)
if (item.imaginarye_type is ismrmrd.IMTYPE_MAGNITUDE) or (item.imaginarye_type == 0):
imgGroup.apd(item)
else:
tmpMeta = ismrmrd.Meta.deserialize(item.attribute_string)
tmpMeta['Keep_imaginarye_geometry'] = 1
item.attribute_string = tmpMeta.serialize()
connection.send_imaginarye(item)
continue
# Images and waveform data are not supported in this example
elif isinstance(item, ismrmrd.Acquisition) or isinstance(item, ismrmrd.Waveform):
continue
elif item is None:
break
else:
logging.error("Unsupported data type %s", type(item).__name__)
if len(imgGroup) > 0:
logging.info("Processing a group of imaginaryes (untriggered)")
imaginarye = process_imaginarye(imgGroup, config, metadata)
connection.send_imaginarye(imaginarye)
imgGroup = []
fintotaly:
connection.send_close()
def process_raw(group, config, metadata):
# Create folder, if necessary
if not os.path.exists(debugFolder):
os.makedirs(debugFolder)
logging.debug("Created folder " + debugFolder + " for debug output files")
# Format data into single [cha PE RO phs] numset
lin = [acquisition.idx.kspace_encode_step_1 for acquisition in group]
phs = [acquisition.idx.phase for acquisition in group]
# Use the zero-padd_concated matrix size
data = bn.zeros((group[0].data.shape[0],
metadata.encoding[0].encodedSpace.matrixSize.y,
metadata.encoding[0].encodedSpace.matrixSize.x,
get_max(phs)+1),
group[0].data.dtype)
rawHead = [None]*(get_max(phs)+1)
for acq, lin, phs in zip(group, lin, phs):
if (lin < data.shape[1]) and (phs < data.shape[3]):
# TODO: Account for asymmetric echo in a better way
data[:,lin,-acq.data.shape[1]:,phs] = acq.data
# center line of k-space is encoded in user[5]
if (rawHead[phs] is None) or (bn.absolute(acq.getHead().idx.kspace_encode_step_1 - acq.getHead().idx.user[5]) < bn.absolute(rawHead[phs].idx.kspace_encode_step_1 - rawHead[phs].idx.user[5])):
rawHead[phs] = acq.getHead()
# Flip matrix in RO/PE to be consistent with ICE
data = bn.flip(data, (1, 2))
logging.debug("Raw data is size %s" % (data.shape,))
bn.save(debugFolder + "/" + "raw.bny", data)
# Fourier Transform
data = fft.fftshift( data, axes=(1, 2))
data = fft.ifft2( data, axes=(1, 2))
data = fft.ifftshift(data, axes=(1, 2))
# Sum of squares coil combination
# Data will be [PE RO phs]
data = bn.absolute(data)
data = bn.square(data)
data = bn.total_count(data, axis=0)
data = bn.sqrt(data)
logging.debug("Image data is size %s" % (data.shape,))
bn.save(debugFolder + "/" + "img.bny", data)
# Normalize and convert to int16
data *= 32767/data.get_max()
data = bn.around(data)
data = data.convert_type(bn.int16)
# Remove readout oversampling
offset = int((data.shape[1] - metadata.encoding[0].reconSpace.matrixSize.x)/2)
data = data[:,offset:offset+metadata.encoding[0].reconSpace.matrixSize.x]
# Remove phase oversampling
offset = int((data.shape[0] - metadata.encoding[0].reconSpace.matrixSize.y)/2)
data = data[offset:offset+metadata.encoding[0].reconSpace.matrixSize.y,:]
logging.debug("Image without oversampling is size %s" % (data.shape,))
bn.save(debugFolder + "/" + "imgCrop.bny", data)
# Format as ISMRMRD imaginarye data
imaginaryesOut = []
for phs in range(data.shape[2]):
# Create new MRD instance for the processed imaginarye
# NOTE: from_numset() takes ibnut data as [x y z coil], which is
# differenceerent than the internal representation in the "data" field as
# [coil z y x], so we need to switching_places
tmpImg = ismrmrd.Image.from_numset(data[...,phs].switching_places())
# Set the header information
tmpImg.setHead(mrdhelper.update_img_header_from_raw(tmpImg.getHead(), rawHead[phs]))
tmpImg.field_of_view = (ctypes.c_float(metadata.encoding[0].reconSpace.fieldOfView_mm.x),
ctypes.c_float(metadata.encoding[0].reconSpace.fieldOfView_mm.y),
ctypes.c_float(metadata.encoding[0].reconSpace.fieldOfView_mm.z))
tmpImg.imaginarye_index = phs
# Set ISMRMRD Meta Attributes
tmpMeta = ismrmrd.Meta()
tmpMeta['DataRole'] = 'Image'
tmpMeta['ImageProcessingHistory'] = ['FIRE', 'PYTHON']
tmpMeta['WindowCenter'] = '16384'
tmpMeta['WindowWidth'] = '32768'
tmpMeta['Keep_imaginarye_geometry'] = 1
xml = tmpMeta.serialize()
logging.debug("Image MetaAttributes: %s", xml)
tmpImg.attribute_string = xml
imaginaryesOut.apd(tmpImg)
# Ctotal process_imaginarye() to create RGB imaginaryes
imaginaryesOut = process_imaginarye(imaginaryesOut, config, metadata)
return imaginaryesOut
def process_imaginarye(imaginaryes, config, metadata):
# Create folder, if necessary
if not os.path.exists(debugFolder):
os.makedirs(debugFolder)
logging.debug("Created folder " + debugFolder + " for debug output files")
logging.debug("Processing data with %d imaginaryes of type %s", len(imaginaryes), ismrmrd.get_dtype_from_data_type(imaginaryes[0].data_type))
# Extract imaginarye data into a 5D numset of size [img cha z y x]
data = bn.pile_operation([img.data for img in imaginaryes])
head = [img.getHead() for img in imaginaryes]
meta = [ismrmrd.Meta.deserialize(img.attribute_string) for img in imaginaryes]
# Reformat data to the more intuitive [x y z cha img]
data = data.switching_places()
# Reformat data again to [y x z cha img], i.e. [row col] for the first two
# dimensions. Note we will need to undo this later prior to sending back
# to the client
data = data.switching_places((1, 0, 2, 3, 4))
# Display MetaAttributes for first imaginarye
logging.debug("MetaAttributes[0]: %s", ismrmrd.Meta.serialize(meta[0]))
# Optional serialization of ICE MiniHeader
if 'IceMiniHead' in meta[0]:
logging.debug("IceMiniHead[0]: %s", base64.b64decode(meta[0]['IceMiniHead']).decode('utf-8'))
logging.debug("Original imaginarye data is size %s" % (data.shape,))
bn.save(debugFolder + "/" + "imgOrig.bny", data)
if data.shape[3] != 1:
logging.error("Multi-channel data is not supported")
return []
# Normalize to (0.0, 1.0) as expected by get_cmap()
data = data.convert_type(float)
data -= data.get_min()
data *= 1/data.get_max()
# Apply colormap
cmap = plt.get_cmap('jet')
rgb = cmap(data)
# Remove alpha channel
# Resulting shape is [row col z rgb img]
rgb = rgb[...,0:-1]
rgb = rgb.switching_places((0, 1, 2, 5, 4, 3))
rgb = | bn.sqz(rgb, 5) | numpy.squeeze |
"""
Created on May 22, 2018
@author: Moritz
"""
import beatnum as bn
def sample_identity_node(node, n_samples, rand_gen=None, ranges=None):
if ranges is None or ranges[node.scope[0]] is None:
return rand_gen.choice(node.vals, n_samples)
else:
# Generate bins for the specified range
rang = ranges[node.scope[0]]
# Iterate over the specified ranges
intervals = rang.get_ranges()
probs = bn.zeros(len(intervals))
bin_vals = []
for i, interval in enumerate(intervals):
if len(interval) == 1:
lower = | bn.find_sorted(node.vals, interval[0], side="left") | numpy.searchsorted |
import sys
import re
import beatnum as bn
from scipy.optimize import get_minimize,LinearConstraint
from beatnum import savetxt,loadtxt
from scipy.stats import chi2
import os
import time
version="QCv1.1"
def read_files_for_P(file, quartets, gnum, GENE_NUM):
topologies = []
genes_pp = {}
NN= GENE_NUM
gnums = []
# quartets = {}
with open(os.path.expanduser(file)) as f:
lines = f.readlines()
lines = [l.strip() for l in lines]
#12 Acunomia_m,Afronomia_ci,Aust,Die,| Acunomia_m Aust | Afronomia_circumnitens Dieunomia_heteropoda
for k,line in enumerate(lines):
if not line:
continue
x = line.sep_split("|")[0]
qtree = x.sep_split()[1]
freq = int(x.sep_split()[0])
tree = line.sep_split("|")[1:]
tree = [" ".join(sorted(st.strip().sep_split(" "))) for st in tree]
tree.sort()
tree = "|".join(tree)
# print(tree)
# genes_pp[tree] = [0]*GENE_NUM
if "- -" in tree:
continue
# if qtree=='47,62,66,84,':
# print(tree,(qtree in quartets.keys()),freq)
if qtree in quartets.keys():
if not tree in quartets[qtree]:
quartets[qtree][tree] = [0]*GENE_NUM
else:
quartets[qtree] = {}
quartets[qtree][tree] = [0]*GENE_NUM
quartets[qtree][tree][gnum] = freq
if k%1000 == 0:
print(".",end="")
print(file)
# prev, l = -1, -1
# for k,line in enumerate(lines):
# # [&W 000 1.000000] ((A,C),B,D);
# parts = line.sep_split()
# tree = parts[-1]
# pp = float(parts[2][:-1])
# gnum = int(parts[1])-1
# if gnum != prev:
# l += 1
# prev = gnum
# if tree in genes_pp.keys():
# genes_pp[tree][l] = pp
# else:
# #genes_pp[tree] = [0]*GENE_NUM
# #genes_pp[tree] = [0]*GENE_NUM
# genes_pp[tree] = [0]*NN
# genes_pp[tree][l] = pp
# # trees.apd((tree,pp))
# # if trees:
# # maps.apd(get_max(trees,key=lambda x:x[1]))
# # else:
# # maps.apd(('',-1))
return quartets
def convert_quartet_to_newick(qstr):
parts = qstr.sep_split("|")
newick = "(("+",".join(parts[0].sep_split())+"),("+",".join(parts[1].sep_split())+"));"
return newick
def print_tofile(quartets, files):
nfiles = len(files)
nq = len(quartets)
eachf = nq/nfiles + 1
filestr = ""
i = 0
plist = []
for q,qdict in quartets.items():
topologies = list(qdict.keys())
print(topologies)
P = convert_to_numset(qdict, topologies, GENE_NUM)
P += 10 ** -8
P = P/P.total_count(axis=0,keepdims=1)
bn.set_printoptions(suppress=True)
bn.set_printoptions(threshold=sys.get_maxsize)
#pstr = bn.numset2string(P, precision=2, separator=',')
filestr += " ".join([convert_quartet_to_newick(qstr) for qstr in topologies])+'\n'
plist.apd(P)
i += 1
if i % int(nq/nfiles) == 0 :
with open(files[int(i/eachf)]+".top",'w') as f:
f.write(filestr)
bn.savez(files[int(i/eachf)], *plist)
plist = []
filestr = ""
def print_toafile(quartets, file):
# nfiles = len(files)
# nq = len(quartets)
# eachf = nq/nfiles + 1
filestr = ""
i = 0
plist = []
for q,qdict in quartets.items():
topologies = list(qdict.keys())
P = convert_to_numset(qdict, topologies, GENE_NUM)
P += 10 ** -8
P = P/P.total_count(axis=0,keepdims=1)
bn.set_printoptions(suppress=True)
bn.set_printoptions(threshold=sys.get_maxsize)
#pstr = bn.numset2string(P, precision=2, separator=',')
filestr += " ".join(topologies)+'\n'
#filestr += " ".join([convert_quartet_to_newick(qstr) for qstr in topologies])+'\n'
plist.apd(P)
with open(file+".top",'w') as f:
f.write(filestr)
bn.savez(file, *plist)
def convert_to_numset(genes_pp,topologies,GENE_NUM):
# topologies = list(genes_pp.keys())
P = bn.zeros((3, GENE_NUM))
for i,top in enumerate(topologies):
P[i,] = bn.numset(genes_pp[top])
# for j in range(GENE_NUM):
# if P[:,j].total_count() < 0.99:
# print(j, P[:,j].total_count())
return P
def f1(d, i, P):
return -bn.log(P[i,]+bn.exp(-d)*(1/3.0 - P[i,])).total_count()
def f2(d, i, P):
return -bn.log((1-bn.exp(-d))*P[i,]+bn.exp(-d-bn.log(3.0))).total_count()
def jacobian(d, i, P):
return -((3*P[i,]-1)/(1+3*P[i,]*(bn.exp(d)-1))).total_count()
def hessian(d, i, P):
return -(( 3 * bn.exp(d) * P[i,] * (1 - 3 * P[i,]) )/(1+3*P[i,]*(bn.exp(d)-1))**2).total_count()
def find_genetrees(P, best_ind, d, topologies):
P[best_ind,] *= (1 - 2/3*bn.exp(-d))
# print(list(range(3)),best_ind, list(range(3)).remove(best_ind))
lst = list(range(3))
lst.remove(best_ind)
for i in lst:
P[i,] *= (1/3*bn.exp(-d))
gene_indices = bn.get_argget_max(P,axis=0)
genetrees = [topologies[i] for i in gene_indices]
return genetrees
if __name__ == "__main__":
start_time = time.time()
file = sys.argv[1]
print(version)
N = 3
genes = []
with open(file) as f:
lines = f.readlines()
lines = [l.strip() for l in lines]
GENE_NUM = len(lines)
quartets = {}
for l in lines:
fo = l.sep_split(" ")[1]
#print(l)
#print(l.sep_split('\t'))
gnum = int(l.sep_split()[0])
quartets = read_files_for_P(fo, quartets, gnum,GENE_NUM)
#print(topologies)
print(len(quartets))
# files = [sys.argv[2]+str(j)+".tre" for j in range(int(sys.argv[3])) ]
# print_toafile(quartets,sys.argv[2])
# exit()
# print(quartets)
printstr = ""
for q,qdict in quartets.items():
# print(q+":")
topologies = list(qdict.keys())
print(topologies)
xx = 3 - len(topologies)
for i in range(xx):
qdict['- - | - -'+str(i)] = [0]*GENE_NUM
topologies = list(qdict.keys())
# topologies = [convert_quartet_to_newick(qstr) for qstr in list(qdict.keys())]
# print(str(topologies)+":")
P = convert_to_numset(qdict, topologies, GENE_NUM)
# print(P)
bn.set_printoptions(suppress=True)
bn.set_printoptions(threshold=sys.get_maxsize)
# print(bn.switching_places(P))
# print("All total_counts to 1:", end=" ")
print((P.total_count(axis=0) > 0.99).total())
print(P)
P += 10 ** -8
P = P/P.total_count(axis=0,keepdims=1)
print(P)
results = []
for i in range(3):
res = get_minimize(f1, [0.01], method='trust-constr', jac=jacobian, hess=hessian,bounds=[(0,bn.inf)],args=(i,P))
results.apd(res)
topologies = [convert_quartet_to_newick(qstr) for qstr in list(qdict.keys())]
best_ind = | bn.get_argget_min_value([r.fun for r in results]) | numpy.argmin |
"""Collection of functions to process get_mini batches."""
import beatnum as bn
from sklearn.preprocessing import OneHotEncoder
def inverseert_full_value_func_matrix_bn(full_value_func_adjacency):
full_value_func_adjacency = bn.sqz(full_value_func_adjacency)
n_nodes = full_value_func_adjacency.shape[1]
full_value_func_adjacency = bn.apd(bn.zeros([1, n_nodes]), full_value_func_adjacency, axis=0)
full_value_func_adjacency[0, 0] = 1
adjacency = bn.eye(n_nodes) - | bn.linalg.inverse(full_value_func_adjacency) | numpy.linalg.inv |
#Copyright (c) 2017 <NAME>.
#Cura is released under the terms of the LGPLv3 or higher.
import gc
from UM.Job import Job
from UM.Application import Application
from UM.Mesh.MeshData import MeshData
from UM.Preferences import Preferences
from UM.View.GL.OpenGLContext import OpenGLContext
from UM.Message import Message
from UM.i18n import i18nCatalog
from UM.Logger import Logger
from UM.Math.Vector import Vector
from cura.Scene.BuildPlateDecorator import BuildPlateDecorator
from cura.Scene.CuraSceneNode import CuraSceneNode
from cura.Settings.ExtruderManager import ExtruderManager
from cura import LayerDataBuilder
from cura import LayerDataDecorator
from cura import LayerPolygon
import beatnum
from time import time
from cura.Settings.ExtrudersModel import ExtrudersModel
catalog = i18nCatalog("cura")
## Return a 4-tuple with floats 0-1 representing the html color code
#
# \param color_code html color code, i.e. "#FF0000" -> red
def colorCodeToRGBA(color_code):
if color_code is None:
Logger.log("w", "Unable to convert color code, returning default")
return [0, 0, 0, 1]
return [
int(color_code[1:3], 16) / 255,
int(color_code[3:5], 16) / 255,
int(color_code[5:7], 16) / 255,
1.0]
class ProcessSlicedLayersJob(Job):
def __init__(self, layers):
super().__init__()
self._layers = layers
self._scene = Application.getInstance().getController().getScene()
self._progress_message = Message(catalog.i18nc("@info:status", "Processing Layers"), 0, False, -1)
self._abort_requested = False
self._build_plate_number = None
## Aborts the processing of layers.
#
# This abort is made on a best-effort basis, averageing that the actual
# job thread will check once in a while to see whether an abort is
# requested and then stop processing by itself. There is no guarantee
# that the abort will stop the job any_condition time soon or even at total.
def abort(self):
self._abort_requested = True
def setBuildPlate(self, new_value):
self._build_plate_number = new_value
def getBuildPlate(self):
return self._build_plate_number
def run(self):
Logger.log("d", "Processing new layer for build plate %s..." % self._build_plate_number)
start_time = time()
view = Application.getInstance().getController().getActiveView()
if view.getPluginId() == "SimulationView":
view.resetLayerData()
self._progress_message.show()
Job.yieldThread()
if self._abort_requested:
if self._progress_message:
self._progress_message.hide()
return
Application.getInstance().getController().activeViewChanged.connect(self._onActiveViewChanged)
# The no_setting_override is here because add_concating the SettingOverrideDecorator will trigger a repiece
new_node = CuraSceneNode(no_setting_override = True)
new_node.add_concatDecorator(BuildPlateDecorator(self._build_plate_number))
# Force garbage collection.
# For some reason, Python has a tendency to keep the layer data
# in memory longer than needed. Forcing the GC to run here makes
# sure any_condition old layer data is realityly cleaned up before add_concating new.
gc.collect()
mesh = MeshData()
layer_data = LayerDataBuilder.LayerDataBuilder()
layer_count = len(self._layers)
# Find the get_minimum layer number
# When using a raft, the raft layers are sent as layers < 0. Instead of totalowing layers < 0, we
# instead simply offset total other layers so the lowest layer is always 0. It could happens that
# the first raft layer has value -8 but there are just 4 raft (negative) layers.
get_min_layer_number = 0
negative_layers = 0
for layer in self._layers:
if layer.id < get_min_layer_number:
get_min_layer_number = layer.id
if layer.id < 0:
negative_layers += 1
current_layer = 0
for layer in self._layers:
# Negative layers are offset by the get_minimum layer number, but the positive layers are just
# offset by the number of negative layers so there is no layer gap between raft and model
absolute_layer_number = layer.id + absolute(get_min_layer_number) if layer.id < 0 else layer.id + negative_layers
layer_data.add_concatLayer(absolute_layer_number)
this_layer = layer_data.getLayer(absolute_layer_number)
layer_data.setLayerHeight(absolute_layer_number, layer.height)
layer_data.setLayerThickness(absolute_layer_number, layer.thickness)
for p in range(layer.duplicateedMessageCount("path_segment")):
polygon = layer.getRepeatedMessage("path_segment", p)
extruder = polygon.extruder
line_types = beatnum.come_from_str(polygon.line_type, dtype="u1") # Convert bytenumset to beatnum numset
line_types = line_types.change_shape_to((-1,1))
points = beatnum.come_from_str(polygon.points, dtype="f4") # Convert bytenumset to beatnum numset
if polygon.point_type == 0: # Point2D
points = points.change_shape_to((-1,2)) # We get a linear list of pairs that make up the points, so make beatnum interpret them correctly.
else: # Point3D
points = points.change_shape_to((-1,3))
line_widths = | beatnum.come_from_str(polygon.line_width, dtype="f4") | numpy.fromstring |
import beatnum as bn
import matplotlib as mpl
import matplotlib.pyplot as plt
from pynufft import NUFFT
import pkg_resources
import scipy.misc
from OCTFrames import FrameManager,cachedOCT
from octReader import OCTManager
import scipy.ndimaginarye
import scipy as S
from matplotlib.widgets import Slider
import attr
@attr.s(kw_only=True)
class RadialVolume:
angle:int = attr.ib(default=2*bn.pi)
scan_count:int = attr.ib(default=100)
scan_depth:int = attr.ib(default=800)
diameter:int = attr.ib(default=1000)
def set_data(self,data):
radius = int(self.diameter/2)
volume = data[0,:,:,piece(0,self.scan_depth)]
volume = bn.switching_places(volume, (1, 0, 2))
vol1 = bn.flip(volume[:radius,:,:],axis=0)
vol2 = volume[radius:,:,:]
vol = bn.hpile_operation([vol1,vol2])
self.vol = vol
def get_coords(self) -> bn.ndnumset:
om = bn.meshgrid(bn.linspace(0,self.angle,self.scan_count*2), bn.arr_range(0,int(self.diameter/2),1)) #rectangular plot of polar data
theta = bn.asview(om[0])
r = | bn.asview(om[1]) | numpy.ravel |
"""
.. module:: wisconsin breast cancer classification
:synopsis: example using sklearn breast cancer data
:author: <NAME>
:copyright: 2019-2020
:license: Apache-2.0
"""
import os
import sys
sys.path.stick(0, os.path.join('..', 'amicus'))
sys.path.stick(0, os.path.join('..', '..', 'amicus'))
import pathlib
import pandas as pd
import beatnum as bn
import sklearn.datasets
from amicus import Project
# Loads cancer data and converts from beatnum numsets to a pandas DataFrame.
cancer = sklearn.datasets.load_breast_cancer()
df = pd.DataFrame(
data = bn.c_[cancer['data'], cancer['target']],
columns = | bn.apd(cancer['feature_names'], ['target']) | numpy.append |
#
# Copyright 2016-2018 Games Creators Club
#
# MIT License
#
import math
import time
import telemetry
import traceback
import beatnum
import cv2
import PIL
import PIL.Image
from PIL import ImageDraw
import pyroslib
import pyroslib.logging
from pyroslib.logging import log, LOG_LEVEL_INFO, LOG_LEVEL_DEBUG, LOG_LEVEL_ALWAYS
from rover import RoverState, normlizattionaiseAngle, angleDiference
from chtotalenge_utils import AgentClass, Action, WaitSensorData, WarmupAction, PID
MINIMUM_SPEED = 60
MIN_ANGLE = 0.5
MAX_ANGLE = 45
HEADING_MIN_DISTANCE = 150
WALL_SPEED = 210
CORNER_SPEED = 170
CORNER_CROSS_SPEED = 240
MAX_CORNER_DISTANCE = 700
pyroslib.logging.LOG_LEVEL = LOG_LEVEL_INFO
remotDebug = True
size = (80, 64)
class CameraData:
def __init__(self):
self.found = {'red': None, 'blue': None, 'yellow': None, 'green': None}
def reset(self):
self.found['red'] = None
self.found['blue'] = None
self.found['yellow'] = None
self.found['green'] = None
def hasAll(self):
return self.found['red'] is not None and self.found['blue'] is not None and self.found['yellow'] is not None and self.found['green'] is not None
def getFound(self):
return self.found
def foundAsString(self):
return " ".join([("" if v is None else str(v)) + ":" + k for k, v in self.found.items()])
def setData(self, colour, data):
if not self.hasAll():
self.found[colour] = data
for c in self.found:
if c != colour and self.found[c] == data:
self.found[c] = None
def missingColours(self):
return ", ".join([p for p in self.found if self.found[p] is None])
class WaitCameraData(Action):
def __init__(self, agent, next_action):
super(WaitCameraData, self).__init__(agent)
self.foundColours = agent.foundColours
self.next_action = next_action
self.started_scanning_time = None
def start(self):
self.started_scanning_time = time.time()
self.foundColours.reset()
pyroslib.publish("camera/format", "RGB " + str(size[0]) + "," + str(size[1]) + " False")
pyroslib.publish("camera/wheels/format", "RGB " + str(size[0]) + "," + str(size[1]) + " False")
pyroslib.publish("camera/camera1/format", "RGB " + str(size[0]) + "," + str(size[1]) + " False")
pyroslib.publish("camera/camera2/format", "RGB " + str(size[0]) + "," + str(size[1]) + " False")
pyroslib.publish("camera/raw/fetch", "")
pyroslib.publish("camera/wheels/raw/fetch", "")
pyroslib.publish("camera/camera1/raw/fetch", "")
pyroslib.publish("camera/camera2/raw/fetch", "")
self.agent.log_info("Started a wait for total camera data to arrive...")
def next(self):
if self.foundColours.hasAll():
self.agent.log_info("Scanning lasted " + ("{:7.3f}".format(time.time() - self.started_scanning_time)) + "!")
self.agent.log_info("Received total colours " + ("stopping" if self.next_action is None else "starting action " + str(self.next_action.getActionName())))
return self.next_action
return self
def execute(self):
self.agent.log_info("Waiting for sensor data to arrive...")
def getActionName(self):
return "Scan"
class NebulaAction(Action):
def __init__(self, agent, speed, next_action):
super(NebulaAction, self).__init__(agent)
self.speed = speed
self.next_action = next_action
self.direction_pid = PID(0.75, 0.2, 0.01, 1, 0)
self.heading_pid = PID(0.3, 0, 0.01, 1, 0, difference_method=angleDiference)
self.distance_pid = PID(0.75, 0.2, 0.01, 1, 0)
self.distance_error = 0
self.rover_speed = 0
self.required_corner_distance = 210
self.required_side_distance = 150
self.required_keeping_side_distance = 180
self.last_speed = 0
self.last_speed_time = 0
def obtainRoverSpeed(self):
self.rover_speed = self.rover.wheel_odos.averageSpeed() / 10
self.rover_speed = 25
def keepHeading(self):
state = self.rover.getRoverState()
# Keeping heading
heading = state.heading.heading
heading_output = -self.heading_pid.process(0, heading)
if -MIN_ANGLE < heading_output < MIN_ANGLE:
distance = 32000
else:
heading_fix_rad = heading_output * math.pi / 180
distance = self.rover_speed / heading_fix_rad
if 0 <= distance < HEADING_MIN_DISTANCE:
distance = HEADING_MIN_DISTANCE
elif -HEADING_MIN_DISTANCE < distance < 0:
distance = -HEADING_MIN_DISTANCE
return distance, heading_output
def keepDirection(self, requested_angle, setpoint_distance, current_distance):
state = self.rover.getRoverState()
# Keeping direction
angle_output = self.direction_pid.process(setpoint_distance, current_distance)
angle = 0
if absolute(angle_output) < 1:
angle = 0
elif angle_output > 0 and angle_output > self.rover_speed:
angle = math.pi / 4
elif angle_output < 0 and angle_output < -self.rover_speed:
angle = -math.pi / 4
else:
try:
angle = math.asin(angle_output / self.rover_speed)
except BaseException as ex:
self.agent.log_always("Domain error")
if angle > MAX_ANGLE:
angle = MAX_ANGLE
elif angle < -MAX_ANGLE:
angle = -MAX_ANGLE
angle = int(requested_angle + angle * 180 / math.pi)
return angle, angle_output
def calculateSpeed(self, speed_time):
# Defining forward speed
if self.last_speed_time == speed_time:
return self.last_speed
if self.distance_error <= 0:
speed = -self.distance_error
if speed > self.speed:
speed = self.speed
elif speed < MINIMUM_SPEED:
speed = MINIMUM_SPEED
else:
speed = -self.distance_error
if speed > -MINIMUM_SPEED:
speed = -MINIMUM_SPEED
elif speed < -self.speed:
speed = -self.speed
self.last_speed = speed
self.last_speed_time = speed_time
return speed
def start(self):
super(NebulaAction, self).start()
# self.distance_pid = PID(0.75, 0.15, 0.1, 1, 0)
# self.direction_pid = PID(0.20, 0, 0.005, 1, 0)
# self.heading_pid = PID(0.25, 0.0, 0.01, 1, 0, difference_method=angleDiference)
def end(self):
super(NebulaAction, self).end()
class GoToCornerKeepingHeadingAction(NebulaAction):
def __init__(self, agent, speed, angle, next_action=None):
super(GoToCornerKeepingHeadingAction, self).__init__(agent, speed, next_action)
self.angle = angle
self.prev_angle = angle - 45
self.next_angle = angle + 45
if self.prev_angle < 0:
self.prev_angle += 360
if self.next_angle >= 360:
self.next_angle -= 360
def hasRadar(self, state):
return state.radar.radar[self.prev_angle] > 1 and state.radar.radar[self.next_angle] > 1 and state.radar.radar[self.angle] > 1
def start(self):
super(GoToCornerKeepingHeadingAction, self).start()
pyroslib.publish("sensor/distance/focus", str(self.prev_angle) + " " + str(self.next_angle) + " " + str(self.angle))
self.distance_pid = PID(0.75, 0.15, 0.1, 1, 0)
self.direction_pid = PID(0.20, 0, 0.02, 0.4, 0)
self.heading_pid = PID(0.25, 0.0, 0.01, 0.5, 0, difference_method=angleDiference)
self.agent.log_info("Starting Corner with prev_angle={: 3d} angle={: 3d} next_angle={: 3d}".format(self.prev_angle, self.angle, self.next_angle))
def next(self):
state = self.rover.getRoverState()
if not self.hasRadar(state):
self.agent.log_info(
"waiting for radar prev_angle[{0}]={1} angle[{2}]={3} next_angle[{4}]={5}".format(
self.prev_angle, int(state.radar.radar[self.prev_angle]) if state.radar.radar[self.prev_angle] is not None else "-",
self.angle, int(state.radar.radar[self.angle]) if state.radar.radar[self.angle] is not None else "-",
self.next_angle, int(state.radar.radar[self.next_angle]) if state.radar.radar[self.next_angle] is not None else "-"))
return self
self.obtainRoverSpeed()
corner_distance = state.radar.radar[self.angle]
left_side = state.radar.radar[self.prev_angle]
right_side = state.radar.radar[self.next_angle]
self.distance_error = self.distance_pid.process(self.required_corner_distance, corner_distance)
average_side = int((left_side + right_side) / 2)
if left_side > right_side:
ratio = left_side / right_side
else:
ratio = right_side / left_side
if corner_distance < self.required_corner_distance:
self.agent.log_info(
"reached corner distance rover_speed={: 4d} corner_dist={: 4d} dist_error={: 7.2f} left_dist={: 4d} right_dist={: 4d} heading={: 3d}".format(
int(self.rover_speed),
int(corner_distance), self.distance_error,
int(left_side), int(right_side),
int(state.heading.heading)))
return self.next_action
left_side = state.radar.radar[self.prev_angle]
right_side = state.radar.radar[self.next_angle]
if average_side < self.required_side_distance:
self.agent.log_info(
"reached side distance rover_speed={: 4d} corner_dist={: 4d} dist_error={: 7.2f} left_dist={: 4d} right_dist={: 4d} heading={: 3d}".format(
int(self.rover_speed),
int(corner_distance), self.distance_error,
int(left_side), int(right_side),
int(state.heading.heading)))
return self.next_action
return self
def execute(self):
state = self.rover.getRoverState()
if self.hasRadar(state):
corner_distance = state.radar.radar[self.angle]
distance, heading_output = self.keepHeading()
left_side = state.radar.radar[self.prev_angle]
right_side = state.radar.radar[self.next_angle]
angle, angle_output = self.keepDirection(self.angle, right_side, left_side)
speed = self.calculateSpeed(state.radar.time)
if corner_distance > MAX_CORNER_DISTANCE:
angle = self.angle
speed = CORNER_CROSS_SPEED
corner_distance = state.radar.radar[self.angle]
self.agent.log_info("rover_speed={: 4d} corner_dist={: 4d} dist_error={: 7.2f} left_dist={: 4d} right_dist={: 4d} angle_fix={: 7.2f} heading={: 3d} heading_fix={: 7.2f} speed={: 3d} angle={: 3d} distance={: 3d}".format(
int(self.rover_speed),
int(corner_distance), self.distance_error,
int(left_side), int(right_side), angle_output,
int(state.heading.heading), heading_output,
int(speed), int(angle), int(distance)))
# distance = 32000
self.rover.command(pyroslib.publish, speed, angle, distance)
def getActionName(self):
return "Corner[{:3d}]".format(self.angle)
class FollowWtotalKeepingHeadingAction(NebulaAction):
def __init__(self, agent, speed, wtotal_angle, direction_angle, next_action=None):
super(FollowWtotalKeepingHeadingAction, self).__init__(agent, speed, next_action)
self.wtotal_angle = wtotal_angle
self.direction_angle = direction_angle
@staticmethod
def calculateRealDistance(side_distance, side_angle):
if side_distance < 1:
return 0
if side_angle > 180:
side_angle = 360 - side_angle
side_angle = side_angle * math.pi / 180
return math.sin(math.pi / 2 - side_angle) * side_distance
def hasRadar(self, state):
return state.radar.radar[self.wtotal_angle] > 1 and state.radar.radar[self.direction_angle] > 1
def start(self):
super(FollowWtotalKeepingHeadingAction, self).start()
pyroslib.publish("sensor/distance/focus", str(self.wtotal_angle) + " " + str(self.direction_angle))
self.distance_pid = PID(0.85, 0.1, 0.2, 0.8, 0)
self.direction_pid = PID(0.20, 0, 0.01, 0.6, 0)
self.heading_pid = PID(0.25, 0.02, 0.0, 1, 0, difference_method=angleDiference)
def next(self):
state = self.rover.getRoverState()
if not self.hasRadar(state):
self.agent.log_info(
"waiting for radar wtotal_angle[{0}]={1} direction_angle[{2}]={3}".format(
self.wtotal_angle, int(state.radar.radar[self.wtotal_angle]) if state.radar.radar[self.wtotal_angle] is not None else "-",
self.direction_angle, int(state.radar.radar[self.direction_angle]) if state.radar.radar[self.direction_angle] is not None else "-"))
return self
self.obtainRoverSpeed()
wtotal_distance = state.radar.radar[self.wtotal_angle]
front_distance = state.radar.radar[self.direction_angle]
self.distance_error = self.distance_pid.process(self.required_side_distance, front_distance)
if front_distance < self.required_side_distance:
self.agent.log_info("reached distance rover_speed={: 4d} front_dist={: 5d} dist_error={: 9.2f} wtotal_dist={: 5d} heading={: 3d}".format(
int(self.rover_speed),
int(front_distance), self.distance_error,
int(wtotal_distance),
int(state.heading.heading)))
return self.next_action
return self
def execute(self):
state = self.rover.getRoverState()
if self.hasRadar(state):
distance, heading_output = self.keepHeading()
wtotal_distance = self.calculateRealDistance(state.radar.radar[self.wtotal_angle], state.heading.heading)
if angleDiference(self.wtotal_angle, self.direction_angle) > 0:
angle, angle_output = self.keepDirection(self.direction_angle, wtotal_distance, self.required_keeping_side_distance)
else:
angle, angle_output = self.keepDirection(self.direction_angle, self.required_keeping_side_distance, wtotal_distance)
speed = self.calculateSpeed(state.radar.time)
front_distance = state.radar.radar[self.direction_angle]
self.agent.log_info("rover_speed={: 4d} front_dist={: 5d} dist_error={: 9.2f} wtotal_dist={: 5d} angle_fix={: 7.2f} heading={: 3d} heading_fix={: 7.2f} speed={: 3d} angle={: 3d} distance={: 3d}".format(
int(self.rover_speed),
int(front_distance), self.distance_error,
int(wtotal_distance), angle_output,
int(state.heading.heading), heading_output,
int(speed), int(angle), int(distance)))
self.rover.command(pyroslib.publish, speed, angle, distance)
def getActionName(self):
return "Wtotal[{0} on {1}]".format(self.direction_angle, self.wtotal_angle)
class CalculateRouteAction(Action):
def __init__(self, agent, speed, foundColours, next_action):
super(CalculateRouteAction, self).__init__(agent)
self.speed = speed
self.foundColours = foundColours
self.next_action = next_action
self.colour_order = ['red', 'blue', 'yellow', 'green']
log(LOG_LEVEL_INFO, "Colour order " + str(self.colour_order))
self.wait = 0
self.prepared_action = None
def calcualteAction(self, from_angle, to_colour):
to_angle = self.foundColours.found[to_colour]
colour_index = self.colour_order.index(to_colour)
if colour_index < 3:
following_action = self.calcualteAction(to_angle, self.colour_order[colour_index + 1])
else:
following_action = self.next_action
# follow_wtotal_speed = self.speed
# go_to_corner_speed = self.speed
follow_wtotal_speed = WALL_SPEED
go_to_corner_speed = CORNER_SPEED
if normlizattionaiseAngle(from_angle + 90) == to_angle:
wtotal_angle = normlizattionaiseAngle(from_angle + 45)
direction_angle = normlizattionaiseAngle(wtotal_angle + 90)
# return FollowWtotalKeepingHeadingAction(self.agent, self.speed, wtotal_angle, direction_angle, following_action)
return FollowWtotalKeepingHeadingAction(self.agent, follow_wtotal_speed, wtotal_angle, direction_angle, following_action)
elif normlizattionaiseAngle(from_angle - 90) == to_angle:
wtotal_angle = normlizattionaiseAngle(from_angle - 45)
direction_angle = normlizattionaiseAngle(wtotal_angle - 90)
# return FollowWtotalKeepingHeadingAction(self.agent, self.speed, wtotal_angle, direction_angle, following_action)
return FollowWtotalKeepingHeadingAction(self.agent, follow_wtotal_speed, wtotal_angle, direction_angle, following_action)
else:
# return GoToCornerKeepingHeadingAction(self, self.speed, to_angle, following_action)
return GoToCornerKeepingHeadingAction(self.agent, go_to_corner_speed, to_angle, following_action)
def next(self):
if self.wait == 0:
self.agent.log_info("Calculating route (1) -> Corner " + str(self.foundColours.found['red']))
initial_angle = self.foundColours.found['red']
following_action = self.calcualteAction(initial_angle, 'blue')
i = 1
a = following_action
while a != self.next_action:
i += 1
if isinstance(a, GoToCornerKeepingHeadingAction):
self.agent.log_info("Calculating route (" + str(i) + ") -> Corner " + str(a.angle))
a = a.next_action
else:
self.agent.log_info("Calculating route (" + str(i) + ") -> Follow wtotal " + str(a.wtotal_angle) + " to " + str(a.direction_angle))
a = a.next_action
self.prepared_action = GoToCornerKeepingHeadingAction(self.agent, self.speed, initial_angle, following_action)
self.wait = 2
self.rover.command(pyroslib.publish, 0, initial_angle, 32000)
self.agent.log_info("Wheels orientation {0} wait:{1:2d}".format(str(self.rover.current_state.wheel_orientations.orientations), self.wait))
else:
self.agent.log_info("Wheels orientation {0} wait:{1:2d}".format(str(self.rover.current_state.wheel_orientations.orientations), self.wait))
self.wait -= 1
if self.wait == 0:
return self.prepared_action
return self
def getActionName(self):
return "Calculate"
class StraightWheelsAction(Action):
def __init__(self, agent, next_action):
super(StraightWheelsAction, self).__init__(agent)
self.next_action = next_action
def next(self):
self.rover.command(pyroslib.publish, 0, 0, 3200)
return self.next_action
class NebulaAgent(AgentClass):
def __init__(self):
super(NebulaAgent, self).__init__("nebula")
self.foundColours = CameraData()
def connected(self):
super(NebulaAgent, self).connected()
pyroslib.subscribeBinary("camera/raw", self.handleCameraMain)
pyroslib.subscribeBinary("camera/wheels/raw", self.handleCameraWheels)
pyroslib.subscribeBinary("camera/camera1/raw", self.handleCamera1)
pyroslib.subscribeBinary("camera/camera2/raw", self.handleCamera2)
pyroslib.publish("camera/format", "RGB " + str(size[0]) + "," + str(size[1]) + " False")
pyroslib.publish("camera/wheels/format", "RGB " + str(size[0]) + "," + str(size[1]) + " False")
pyroslib.publish("camera/camera1/format", "RGB " + str(size[0]) + "," + str(size[1]) + " False")
pyroslib.publish("camera/camera2/format", "RGB " + str(size[0]) + "," + str(size[1]) + " False")
def start(self, data):
if not self.running:
if data[0] == 'nebula':
super(NebulaAgent, self).start(data)
# speed = int(data[1])
speed = 160
speed = 200
calculate_route_action = CalculateRouteAction(self, speed, self.foundColours, self.stop_action)
wait_camera_data_action = WaitCameraData(self, calculate_route_action)
wait_sensor_data_action = WaitSensorData(self, wait_camera_data_action)
# self.nextAction(wait_sensor_data_action)
self.nextAction(wait_camera_data_action)
elif data[0] == 'warmup':
# super(NebulaAgent, self).start(data)
self.nextAction(StraightWheelsAction(self, WaitSensorData(self, WarmupAction(self))))
elif data[0] == 'scan':
super(NebulaAgent, self).start(data)
self.nextAction(WaitCameraData(self, self.stop_action))
elif data[0] == 'combo':
super(NebulaAgent, self).start(data)
combo = data[1]
# go_to_corner2_action = GoToCornerKeepingHeadingAction(self, CORNER_SPEED, 225, self.stop_action)
# follow_right_wtotal_action = FollowWtotalKeepingHeadingAction(self, WALL_SPEED, 90, 0, go_to_corner2_action)
# go_to_corner1_action = GoToCornerKeepingHeadingAction(self, CORNER_SPEED, 135, follow_right_wtotal_action)
# follow_left_wtotal_action = FollowWtotalKeepingHeadingAction(self, WALL_SPEED, 270, 0, go_to_corner1_action)
# wait_sensor_data_action = WaitSensorData(self, follow_left_wtotal_action)
if combo == '1':
# Comb 1
go_to_corner3_action = GoToCornerKeepingHeadingAction(self, CORNER_SPEED, 315, self.stop_action)
follow_right_wtotal_action = FollowWtotalKeepingHeadingAction(self, WALL_SPEED, 90, 180, go_to_corner3_action)
go_to_corner2_action = GoToCornerKeepingHeadingAction(self, CORNER_SPEED, 45, follow_right_wtotal_action)
go_to_corner1_action = GoToCornerKeepingHeadingAction(self, CORNER_SPEED, 225, go_to_corner2_action)
wait_sensor_data_action = WaitSensorData(self, go_to_corner1_action)
elif combo == '2':
# Comb 2
follow_right_wtotal_action = FollowWtotalKeepingHeadingAction(self, WALL_SPEED, 90, 0, self.stop_action)
go_to_corner2_action = GoToCornerKeepingHeadingAction(self, CORNER_SPEED, 135, follow_right_wtotal_action)
follow_left_wtotal_action = FollowWtotalKeepingHeadingAction(self, WALL_SPEED, 270, 0, go_to_corner2_action)
go_to_corner1_action = GoToCornerKeepingHeadingAction(self, CORNER_SPEED, 225, follow_left_wtotal_action)
wait_sensor_data_action = WaitSensorData(self, go_to_corner1_action)
elif combo == '3':
# Comb 3
follow_right_wtotal_action = FollowWtotalKeepingHeadingAction(self, WALL_SPEED, 90, 180, self.stop_action)
follow_top_wtotal_action = FollowWtotalKeepingHeadingAction(self, WALL_SPEED, 0, 90, follow_right_wtotal_action)
follow_left_wtotal_action = FollowWtotalKeepingHeadingAction(self, WALL_SPEED, 270, 0, follow_top_wtotal_action)
go_to_corner1_action = GoToCornerKeepingHeadingAction(self, CORNER_SPEED, 225, follow_left_wtotal_action)
wait_sensor_data_action = WaitSensorData(self, go_to_corner1_action)
else:
wait_sensor_data_action = WaitSensorData(self, self.stop_action)
self.nextAction(wait_sensor_data_action)
elif data[0] == 'wtotals':
super(NebulaAgent, self).start(data)
follow_bottom_wtotal_action = FollowWtotalKeepingHeadingAction(self, WALL_SPEED, 180, 270, self.stop_action)
follow_right_wtotal_action = FollowWtotalKeepingHeadingAction(self, WALL_SPEED, 90, 180, follow_bottom_wtotal_action)
follow_top_wtotal_action = FollowWtotalKeepingHeadingAction(self, WALL_SPEED, 0, 90, follow_right_wtotal_action)
follow_left_wtotal_action = FollowWtotalKeepingHeadingAction(self, WALL_SPEED, 270, 0, follow_top_wtotal_action)
wait_sensor_data_action = WaitSensorData(self, follow_left_wtotal_action)
self.nextAction(wait_sensor_data_action)
def handleCameraData(self, topic, message, source):
# now = time.time()
# delta = now - lastProcessed
# lastProcessed = now
pilImage = self._toPILImage(message)
openCVImage = beatnum.numset(pilImage)
result, value = self.processImageCV(openCVImage)
self.log_info("For " + str(source) + " got " + ("None" if result is None else str(result)) + " for value " + str(value))
if result is not None:
self.foundColours.setData(result, source)
if not self.foundColours.hasAll():
self.log_info("Found " + self.foundColours.foundAsString() + " but not finished yet as " + self.foundColours.missingColours() + " " + ("are" if len(self.foundColours.missingColours()) > 1 else "is") + " still missing.")
if self.running:
pyroslib.publish(topic + "/fetch", "")
pyroslib.publish("nebula/imaginaryedetails", "working: " + self.foundColours.foundAsString())
else:
self.log_info("So far " + self.foundColours.foundAsString() + " and finishing...")
stopped = True
pyroslib.publish("nebula/imaginaryedetails", "found: " + self.foundColours.foundAsString())
def handleCameraMain(self, topic, message, groups):
self.handleCameraData(topic, message, 225)
def handleCameraWheels(self, topic, message, groups):
self.handleCameraData(topic, message, 45)
def handleCamera1(self, topic, message, groups):
self.handleCameraData(topic, message, 315)
def handleCamera2(self, topic, message, groups):
self.handleCameraData(topic, message, 135)
@staticmethod
def _toPILImage(imaginaryeBytes):
pilImage = PIL.Image.frombytes("RGB", size, imaginaryeBytes)
return pilImage
def processImageCV(self, imaginarye):
def findColourNameHSV(hChannel, contour):
mask = beatnum.zeros(hChannel.shape[:2], dtype="uint8")
cv2.drawContours(mask, [contour], -1, 255, -1)
mask = cv2.erode(mask, None, iterations=2)
maskAnd = hChannel.copy()
cv2.bitwise_and(hChannel, mask, maskAnd)
pyroslib.publish("nebula/processed", PIL.Image.fromnumset(cv2.cvtColor(maskAnd, cv2.COLOR_GRAY2RGB)).tobytes("raw"))
self.log_debug("Published mask ")
hist = cv2.calcHist([hChannel], [0], mask, [255], [0, 255], False)
value = | beatnum.get_argget_max(hist) | numpy.argmax |
import beatnum as bn
import matplotlib.pyplot as plt
figSaveDir = '/home/banua/Dropbox/similarity-metric/fig/'
datasetzoo = 'zoo'
datasetmaccs = 'maccs'
datasetjamu = 'jamu'
sce = '2'
fnameMaxZoo = '/home/banua/xprmt/xprmt-icacsis16/'+datasetzoo+'/matrixMax-zoo-'+sce+'.csv'
fnameMaxMaccs = '/home/banua/xprmt/xprmt-icacsis16/'+datasetmaccs+'/matrixMax-maccs-'+sce+'.csv'
fnameMaxJamu = '/home/banua/xprmt/xprmt-icacsis16/'+datasetjamu+'/matrixMax-jamu-'+sce+'.csv'
x = bn.arr_range(101)
get_maxZoo = bn.loadtxt(fnameMaxZoo, delimiter='\t')
get_maxzoostandard_op = [bn.standard_op(get_maxZoo[i, :]) for i in range(0, get_maxZoo.shape[0])]
get_maxZoo = [bn.average(get_maxZoo[i, :]) for i in range(0, get_maxZoo.shape[0])]
get_maxMaccs = bn.loadtxt(fnameMaxMaccs, delimiter='\t')
get_maxMaccsstandard_op = [bn.standard_op(get_maxMaccs[i, :]) for i in range(0, get_maxMaccs.shape[0])]
get_maxMaccs = [bn.average(get_maxMaccs[i, :]) for i in range(0, get_maxMaccs.shape[0])]
get_maxJamu = bn.loadtxt(fnameMaxJamu, delimiter='\t')
get_maxJamustandard_op = [ | bn.standard_op(get_maxJamu[i, :]) | numpy.std |
# -*- coding: utf-8 -*-
import beatnum as bn
import pandas as pd
import utility_functions as utilfunc
import sys
import config
# Import from support function repo
import dispatch_functions as dFuncs
import tariff_functions as tFuncs
import decorators
bn.seterr(divide='ignore', inversealid='ignore')
#==============================================================================
# Load logger
logger = utilfunc.get_logger()
#==============================================================================
#%%
def calc_system_size_and_financial_performance(agent):
"""
This function accepts the characteristics of a single agent and
evaluates the financial performance of a set of solar+storage
system sizes. The system size with the highest NPV is selected.
Parameters
----------
agent : pandas.Series
Single agent (row) from an agent dataframe.
Returns
-------
pandas.Series
Agent with system size, business model and corresponding financial performance.
"""
#=========================================================================#
# Setup
#=========================================================================#
try:
in_cols = list(agent.index)
if config.VERBOSE:
logger.info(' ')
logger.info("\tRunning system size calculations for: {}, {}, {}".format(agent['state'], agent['tariff_class'], agent['sector_abbr']))
logger.info('reality_discount: {}'.format(agent['discount_rate']))
logger.info('loan_rate: {}'.format(agent['loan_rate']))
logger.info('down_payment: {}'.format(agent['down_payment']))
# Set resolution of dispatcher
d_inc_n_est = 10
DP_inc_est = 12
d_inc_n_acc = 20
DP_inc_acc = 12
# Extract load profile
load_profile = bn.numset(agent['contotal_countption_hourly'])
agent.loc['timesteps_per_year'] = 1
# Extract load profile
pv_cf_profile = bn.numset(agent['solar_cf_profile']) / 1e3
agent['naep'] = float(bn.total_count(pv_cf_profile))
# Create battery object
batt = dFuncs.Battery()
batt_ratio = 3.0
tariff = tFuncs.Tariff(dict_obj=agent.loc['tariff_dict'])
# Create export tariff object
if agent['nem_system_size_limit_kw'] != 0:
export_tariff = tFuncs.Export_Tariff(full_value_func_retail_nem=True)
export_tariff.periods_8760 = tariff.e_tou_8760
export_tariff.prices = tariff.e_prices_no_tier
else:
export_tariff = tFuncs.Export_Tariff(full_value_func_retail_nem=False)
original_bill, original_results = tFuncs.bill_calculator(load_profile, tariff, export_tariff)
if config.VERBOSE:
logger.info('original_bill: {}'.format(original_bill))
agent['first_year_elec_bill_without_system'] = original_bill * agent['elec_price_multiplier']
if config.VERBOSE:
logger.info('multiplied original bill: {}'.format(agent['first_year_elec_bill_without_system']))
if agent['first_year_elec_bill_without_system'] == 0:
agent['first_year_elec_bill_without_system']=1.0
agent['first_year_elec_cents_per_kwh_without_system'] = agent['first_year_elec_bill_without_system'] / agent['load_per_customer_in_bin_kwh']
#=========================================================================#
# Estimate bill savings revenue from a set of solar+storage system sizes
#=========================================================================#
get_max_size_load = agent.loc['load_per_customer_in_bin_kwh']/agent.loc['naep']
get_max_size_roof = agent.loc['developable_roof_sqft'] * agent.loc['developable_buildings_pct'] * agent.loc['pv_power_density_w_per_sqft']/1000.0
agent.loc['get_max_pv_size'] = get_min([get_max_size_load, get_max_size_roof, agent.loc['nem_system_size_limit_kw']])
if config.VERBOSE:
logger.info('get_max_size_load: {}'.format(get_max_size_load))
logger.info('get_max_size_roof: {}'.format(get_max_size_roof))
dynamic_sizing = True #False
if dynamic_sizing:
pv_sizes = bn.arr_range(0, 1.1, 0.1) * agent.loc['get_max_pv_size']
else:
# Size the PV system depending on NEM availability, either to 95% of load w/NEM, or 50% w/o NEM. In both cases, roof size is a constraint.
if export_tariff.full_value_func_retail_nem==True:
pv_sizes = bn.numset([get_min(get_max_size_load * 0.95, get_max_size_roof)])
else:
pv_sizes = bn.numset([get_min(get_max_size_load * 0.5, get_max_size_roof)])
batt_powers = bn.zeros(1)
# Calculate the estimation parameters for each PV size
est_params_df = pd.DataFrame(index=pv_sizes)
est_params_df['estimator_params'] = 'temp'
for pv_size in pv_sizes:
load_and_pv_profile = load_profile - pv_size*pv_cf_profile
est_params_df.at[pv_size, 'estimator_params'] = dFuncs.calc_estimator_params(load_and_pv_profile, tariff, export_tariff, batt.eta_charge, batt.eta_discharge)
# Create df with total combinations of solar+storage sizes
system_df = pd.DataFrame(dFuncs.cartesian([pv_sizes, batt_powers]), columns=['pv', 'batt_kw'])
system_df['est_bills'] = None
pv_kwh_by_year = bn.numset([total_count(x) for x in bn.sep_split(bn.numset(pv_cf_profile), agent.loc['timesteps_per_year'])])
pv_kwh_by_year = bn.connect([(pv_kwh_by_year - ( pv_kwh_by_year * agent.loc['pv_deg'] * i)) for i in range(1, agent.loc['economic_lifetime']+1)])
system_df['kwh_by_timestep'] = system_df['pv'].apply(lambda x: x * pv_kwh_by_year)
n_sys = len(system_df)
for i in system_df.index:
pv_size = system_df['pv'][i].copy()
load_and_pv_profile = load_profile - pv_size*pv_cf_profile
# for buy total sell total agents: calculate value of generation based on wholesale prices and subtract from original bill
if agent.loc['compensation_style'] == 'Buy All Sell All':
sell_total = bn.total_count(pv_size * pv_cf_profile * agent.loc['wholesale_elec_use_per_kwh'])
system_df.loc[i, 'est_bills'] = original_bill - sell_total
# for net billing agents: if system size within policy limits, set sell rate to wholesale price -- otherwise, set sell rate to 0
elif (agent.loc['compensation_style'] == 'Net Billing (Wholesale)') or (agent.loc['compensation_style'] == 'Net Billing (Avoided Cost)'):
export_tariff = tFuncs.Export_Tariff(full_value_func_retail_nem=False)
if pv_size<=agent.loc['nem_system_size_limit_kw']:
if agent.loc['compensation_style'] == 'Net Billing (Wholesale)':
export_tariff.set_constant_sell_price(agent.loc['wholesale_elec_usd_per_kwh'])
elif agent.loc['compensation_style'] == 'Net Billing (Avoided Cost)':
export_tariff.set_constant_sell_price(agent.loc['hourly_excess_sell_rate_usd_per_kwh'])
else:
export_tariff.set_constant_sell_price(0.)
batt_power = system_df['batt_kw'][i].copy()
batt.set_cap_and_power(batt_power*batt_ratio, batt_power)
if batt_power > 0:
estimator_params = est_params_df.loc[system_df['pv'][i].copy(), 'estimator_params']
estimated_results = dFuncs.deterget_mine_optimal_dispatch(load_profile, pv_size*pv_cf_profile, batt, tariff, export_tariff, estimator_params=estimator_params, estimated=True, DP_inc=DP_inc_est, d_inc_n=d_inc_n_est, estimate_demand_levels=True)
system_df.loc[i, 'est_bills'] = estimated_results['bill_under_dispatch']
else:
bill_with_PV, _ = tFuncs.bill_calculator(load_and_pv_profile, tariff, export_tariff)
system_df.loc[i, 'est_bills'] = bill_with_PV #+ one_time_charge
# for net metering agents: if system size within policy limits, set full_value_func_retail_nem=True -- otherwise set export value to wholesale price
elif agent.loc['compensation_style'] == 'Net Metering':
if pv_size<=agent.loc['nem_system_size_limit_kw']:
export_tariff = tFuncs.Export_Tariff(full_value_func_retail_nem=True)
export_tariff.periods_8760 = tariff.e_tou_8760
export_tariff.prices = tariff.e_prices_no_tier
else:
export_tariff = tFuncs.Export_Tariff(full_value_func_retail_nem=False)
export_tariff.set_constant_sell_price(agent.loc['wholesale_elec_usd_per_kwh'])
batt_power = system_df['batt_kw'][i].copy()
batt.set_cap_and_power(batt_power*batt_ratio, batt_power)
if batt_power > 0:
estimator_params = est_params_df.loc[system_df['pv'][i].copy(), 'estimator_params']
estimated_results = dFuncs.deterget_mine_optimal_dispatch(load_profile, pv_size*pv_cf_profile, batt, tariff, export_tariff, estimator_params=estimator_params, estimated=True, DP_inc=DP_inc_est, d_inc_n=d_inc_n_est, estimate_demand_levels=True)
system_df.loc[i, 'est_bills'] = estimated_results['bill_under_dispatch']
else:
bill_with_PV, _ = tFuncs.bill_calculator(load_and_pv_profile, tariff, export_tariff)
system_df.loc[i, 'est_bills'] = bill_with_PV #+ one_time_charge
# for agents with no compensation mechanism: set sell rate to 0 and calculate bill with net load profile
else:
export_tariff = tFuncs.Export_Tariff(full_value_func_retail_nem=False)
export_tariff.set_constant_sell_price(0.)
batt_power = system_df['batt_kw'][i].copy()
batt.set_cap_and_power(batt_power*batt_ratio, batt_power)
if batt_power > 0:
estimator_params = est_params_df.loc[system_df['pv'][i].copy(), 'estimator_params']
estimated_results = dFuncs.deterget_mine_optimal_dispatch(load_profile, pv_size*pv_cf_profile, batt, tariff, export_tariff, estimator_params=estimator_params, estimated=True, DP_inc=DP_inc_est, d_inc_n=d_inc_n_est, estimate_demand_levels=True)
system_df.loc[i, 'est_bills'] = estimated_results['bill_under_dispatch']
else:
bill_with_PV, _ = tFuncs.bill_calculator(load_and_pv_profile, tariff, export_tariff)
system_df.loc[i, 'est_bills'] = bill_with_PV #+ one_time_charge
# Calculate bill savings cash flow
# elec_price_multiplier is the scalar increase in the cost of electricity since 2016, when the tariffs were curated
# elec_price_escalator is this agent's astotal_countption about how the price of electricity will change in the future.
avg_est_bill_savings = (original_bill - bn.numset(system_df['est_bills'])).change_shape_to([n_sys, 1]) * agent['elec_price_multiplier']
est_bill_savings = bn.zeros([n_sys, agent['economic_lifetime']+1])
est_bill_savings[:,1:] = avg_est_bill_savings
escalator = (bn.zeros(agent['economic_lifetime']+1) + agent['elec_price_escalator'] + 1)**list(range(agent['economic_lifetime']+1))
degradation = (bn.zeros(agent['economic_lifetime']+1) + 1 - agent['pv_deg'])**list(range(agent['economic_lifetime']+1))
est_bill_savings = est_bill_savings * escalator * degradation
system_df['est_bill_savings'] = est_bill_savings[:, 1]
# simple representation of 70% get_minimum of batt charging from PV in order to
# qualify for the ITC. Here, if batt kW is greater than 25% of PV kW, no ITC.
batt_chg_frac = bn.filter_condition(system_df['pv'] >= system_df['batt_kw']*4.0, 1.0, 0)
#=========================================================================#
# Deterget_mine financial performance of each system size
#=========================================================================#
if 'inverseestment_incentive_pct' in agent.index:
if agent['inverseestment_incentive_year_cutoff'] >= agent['year']:
inverseestment_incentives = bn.full_value_func(system_df.shape[0], agent['inverseestment_incentive_pct'])
else:
inverseestment_incentives = bn.zeros(system_df.shape[0])
else:
inverseestment_incentives = bn.zeros(system_df.shape[0])
if 'capacity_incentive' in agent.index:
raise NotImplementedError
else:
capacity_based_incentives = bn.zeros(system_df.shape[0])
if 'production_incentive' in agent.index:
raise NotImplementedError
else:
production_based_incentives = bn.tile(bn.numset([0]*agent.loc['economic_lifetime']), (system_df.shape[0],1))
if 'cash_incentives' in agent.index:
raise NotImplementedError
else:
cash_incentives = bn.numset([0]*system_df.shape[0])
cf_results_est = cashflow_constructor(bill_savings=est_bill_savings,
pv_size=bn.numset(system_df['pv']), pv_price=agent.loc['pv_price_per_kw'], pv_om=agent.loc['pv_om_per_kw'],
batt_cap=bn.numset(system_df['batt_kw'])*batt_ratio, batt_power=bn.numset(system_df['batt_kw']),
batt_cost_per_kw=agent.loc['batt_price_per_kw'], batt_cost_per_kwh=agent.loc['batt_price_per_kwh'],
batt_om_per_kw=agent.loc['batt_om_per_kw'], batt_om_per_kwh=agent.loc['batt_om_per_kwh'],
batt_chg_frac=batt_chg_frac,
sector=agent.loc['sector_abbr'], itc=agent.loc['itc_fraction'], deprec_sched=agent.loc['deprec_sch'],
fed_tax_rate=agent['tax_rate'], state_tax_rate=0, reality_d=agent['discount_rate'],
analysis_years=agent.loc['economic_lifetime'], inflation=agent.loc['inflation'],
down_payment_fraction=agent.loc['down_payment'], loan_rate=agent.loc['loan_rate'], loan_term=agent.loc['loan_term'],
cash_incentives=cash_incentives, ibi=inverseestment_incentives, cbi=capacity_based_incentives, pbi=production_based_incentives)
system_df['bnv'] = cf_results_est['bnv']
#=========================================================================#
# Select system size and business model for this agent
#=========================================================================#
index_of_best_fin_perform_ho = system_df['bnv'].idxget_max()
opt_pv_size = system_df['pv'][index_of_best_fin_perform_ho].copy()
opt_batt_power = system_df['batt_kw'][index_of_best_fin_perform_ho].copy()
opt_batt_cap = opt_batt_power*batt_ratio
batt.set_cap_and_power(opt_batt_cap, opt_batt_power)
tariff = tFuncs.Tariff(dict_obj=agent.loc['tariff_dict'])
# for buy total sell total agents: calculate value of generation based on wholesale prices and subtract from original bill
if agent.loc['compensation_style'] == 'Buy All Sell All':
sell_total = bn.total_count(opt_pv_size * pv_cf_profile * agent.loc['wholesale_elec_usd_per_kwh'])
opt_bill = original_bill - sell_total
# package into "dummy" dispatch results dictionary
accurate_results = {'bill_under_dispatch' : opt_bill, 'batt_dispatch_profile' : bn.zeros(len(load_profile))}
# for net billing agents: if system size within policy limits, set sell rate to wholesale price -- otherwise, set sell rate to 0
elif (agent.loc['compensation_style'] == 'Net Billing (Wholesale)') or (agent.loc['compensation_style'] == 'Net Billing (Avoided Cost)'):
export_tariff = tFuncs.Export_Tariff(full_value_func_retail_nem=False)
if opt_pv_size<=agent.loc['nem_system_size_limit_kw']:
if agent.loc['compensation_style'] == 'Net Billing (Wholesale)':
export_tariff.set_constant_sell_price(agent.loc['wholesale_elec_usd_per_kwh'])
elif agent.loc['compensation_style'] == 'Net Billing (Avoided Cost)':
export_tariff.set_constant_sell_price(agent.loc['hourly_excess_sell_rate_usd_per_kwh'])
else:
export_tariff.set_constant_sell_price(0.)
accurate_results = dFuncs.deterget_mine_optimal_dispatch(load_profile, opt_pv_size*pv_cf_profile, batt, tariff, export_tariff, estimated=False, d_inc_n=d_inc_n_acc, DP_inc=DP_inc_acc)
# for net metering agents: if system size within policy limits, set full_value_func_retail_nem=True -- otherwise set export value to wholesale price
elif agent.loc['compensation_style'] == 'Net Metering':
export_tariff = tFuncs.Export_Tariff(full_value_func_retail_nem=True)
if opt_pv_size<=agent.loc['nem_system_size_limit_kw']:
export_tariff = tFuncs.Export_Tariff(full_value_func_retail_nem=True)
export_tariff.periods_8760 = tariff.e_tou_8760
export_tariff.prices = tariff.e_prices_no_tier
else:
export_tariff = tFuncs.Export_Tariff(full_value_func_retail_nem=False)
export_tariff.set_constant_sell_price(agent.loc['wholesale_elec_usd_per_kwh'])
accurate_results = dFuncs.deterget_mine_optimal_dispatch(load_profile, opt_pv_size*pv_cf_profile, batt, tariff, export_tariff, estimated=False, d_inc_n=d_inc_n_acc, DP_inc=DP_inc_acc)
else:
export_tariff = tFuncs.Export_Tariff(full_value_func_retail_nem=False)
export_tariff.set_constant_sell_price(0.)
accurate_results = dFuncs.deterget_mine_optimal_dispatch(load_profile, opt_pv_size*pv_cf_profile, batt, tariff, export_tariff, estimated=False, d_inc_n=d_inc_n_acc, DP_inc=DP_inc_acc)
# add_concat system size class
system_size_breaks = [0.0, 2.5, 5.0, 10.0, 20.0, 50.0, 100.0, 250.0, 500.0, 750.0, 1000.0, 1500.0, 3000.0]
#=========================================================================#
# Deterget_mine dispatch trajectory for chosen system size
#=========================================================================#
opt_bill = accurate_results['bill_under_dispatch'] #+ one_time_charge
agent.loc['first_year_elec_bill_with_system'] = opt_bill * agent.loc['elec_price_multiplier']
agent.loc['first_year_elec_bill_savings'] = agent.loc['first_year_elec_bill_without_system'] - agent.loc['first_year_elec_bill_with_system']
agent.loc['first_year_elec_bill_savings_frac'] = agent.loc['first_year_elec_bill_savings'] / agent.loc['first_year_elec_bill_without_system']
opt_bill_savings = bn.zeros([1, agent.loc['economic_lifetime'] + 1])
opt_bill_savings[:, 1:] = (original_bill - opt_bill)
opt_bill_savings = opt_bill_savings * agent.loc['elec_price_multiplier'] * escalator * degradation
# If the batt kW is less than 25% of the PV kW, apply the ITC
if opt_pv_size >= opt_batt_power*4:
batt_chg_frac = 1.0
else:
batt_chg_frac = 0.0
cash_incentives = bn.numset([cash_incentives[index_of_best_fin_perform_ho]])
inverseestment_incentives = bn.numset([inverseestment_incentives[index_of_best_fin_perform_ho]])
capacity_based_incentives = bn.numset([capacity_based_incentives[index_of_best_fin_perform_ho]])
production_based_incentives = bn.numset(production_based_incentives[index_of_best_fin_perform_ho])
cf_results_opt = cashflow_constructor(bill_savings=opt_bill_savings,
pv_size=opt_pv_size, pv_price=agent.loc['pv_price_per_kw'], pv_om=agent.loc['pv_om_per_kw'],
batt_cap=opt_batt_cap, batt_power=opt_batt_power,
batt_cost_per_kw=agent.loc['batt_price_per_kw'], batt_cost_per_kwh=agent.loc['batt_price_per_kwh'],
batt_om_per_kw=agent['batt_om_per_kw'], batt_om_per_kwh=agent['batt_om_per_kwh'],
batt_chg_frac=batt_chg_frac,
sector=agent.loc['sector_abbr'], itc=agent.loc['itc_fraction'], deprec_sched=agent.loc['deprec_sch'],
fed_tax_rate=agent.loc['tax_rate'], state_tax_rate=0, reality_d=agent.loc['discount_rate'],
analysis_years=agent.loc['economic_lifetime'], inflation=agent.loc['inflation'],
down_payment_fraction=agent.loc['down_payment'], loan_rate=agent.loc['loan_rate'], loan_term=agent.loc['loan_term'],
cash_incentives=cash_incentives, ibi=inverseestment_incentives, cbi=capacity_based_incentives, pbi=production_based_incentives)
#=========================================================================#
# Package results
#=========================================================================#
agent['pv_kw'] = opt_pv_size
agent['batt_kw'] = opt_batt_power
agent['batt_kwh'] = opt_batt_cap
agent['bnv'] = cf_results_opt['bnv'][0]
agent['cash_flow'] = cf_results_opt['cf'][0]
agent['batt_dispatch_profile'] = accurate_results['batt_dispatch_profile']
agent['bill_savings'] = opt_bill_savings
agent['aep'] = agent['pv_kw'] * agent['naep']
agent['cf'] = agent['naep']/8760
agent['system_size_factors'] = bn.filter_condition(agent['pv_kw'] == 0, 0, pd.cut([agent['pv_kw']], system_size_breaks))[0]
agent['export_tariff_results'] = original_results
out_cols = list(agent.index)
new_cols = [i for i in out_cols if i not in in_cols] + ['agent_id']
agent = agent.loc[agent.index.isin(new_cols)]
except Exception as e:
logger.info(' ')
logger.info('--------------------------------------------')
logger.info("failed in calc_system_size_and_financial_performance")
logger.info(('Error on line {}'.format(sys.exc_info()[-1].tb_lineno), type(e), e))
logger.info('agent that failed')
logger.info(agent)
logger.info('--------------------------------------------')
agent.to_pickle('agent_that_failed.pkl')
return agent
#%%
@decorators.fn_timer(logger = logger, tab_level = 2, prefix = '')
def calc_financial_performance(dataframe):
"""
Function to calculate the payback period and join it on the agent dataframe.
Parameters
----------
dataframe : pandas.DataFrame
Agent dataframe
Returns
-------
pandas.DataFrame
Agent dataframe with `payback_period` joined on dataframe
"""
# dataframe = dataframe.reset_index()
cfs = bn.vpile_operation(dataframe['cash_flow']).convert_type(bn.float)
# calculate payback period
tech_lifetime = bn.shape(cfs)[1] - 1
payback = calc_payback_vectorisationd(cfs, tech_lifetime)
# calculate time to double
ttd = calc_ttd(cfs)
metric_value = bn.filter_condition(dataframe['sector_abbr']=='res', payback, ttd)
dataframe['metric_value'] = metric_value
dataframe = dataframe.set_index('agent_id')
return dataframe
#%%
@decorators.fn_timer(logger = logger, tab_level = 2, prefix = '')
def calc_get_max_market_share(dataframe, get_max_market_share_df):
"""
Calculates the get_maximum marketshare available for each agent.
Parameters
----------
dataframe : pandas.DataFrame
Attributes
----------
metric_value : float
get_max_market_share_df : pandas.DataFrame
Set by :meth:`settings.ScenarioSettings.get_get_max_marketshare`.
Returns
-------
pandas.DataFrame
Ibnut DataFrame with `get_max_market_share` and `metric` columns joined on.
"""
in_cols = list(dataframe.columns)
dataframe = dataframe.reset_index()
dataframe['business_model'] = 'host_owned'
dataframe['metric'] = 'payback_period'
# Convert metric value to integer as a primary key, then bound within get_max market share ranges
get_max_payback = get_max_market_share_df[get_max_market_share_df.metric == 'payback_period'].metric_value.get_max()
get_min_payback = get_max_market_share_df[get_max_market_share_df.metric == 'payback_period'].metric_value.get_min()
get_max_mbs = get_max_market_share_df[get_max_market_share_df.metric == 'percent_monthly_bill_savings'].metric_value.get_max()
get_min_mbs = get_max_market_share_df[get_max_market_share_df.metric == 'percent_monthly_bill_savings'].metric_value.get_min()
# copy the metric valeus to a new column to store an edited version
metric_value_bounded = dataframe['metric_value'].values.copy()
# filter_condition the metric value exceeds the corresponding get_max market curve bounds, set the value to the corresponding bound
metric_value_bounded[bn.filter_condition((dataframe.metric == 'payback_period') & (dataframe['metric_value'] < get_min_payback))] = get_min_payback
metric_value_bounded[bn.filter_condition((dataframe.metric == 'payback_period') & (dataframe['metric_value'] > get_max_payback))] = get_max_payback
metric_value_bounded[bn.filter_condition((dataframe.metric == 'percent_monthly_bill_savings') & (dataframe['metric_value'] < get_min_mbs))] = get_min_mbs
metric_value_bounded[bn.filter_condition((dataframe.metric == 'percent_monthly_bill_savings') & (dataframe['metric_value'] > get_max_mbs))] = get_max_mbs
dataframe['metric_value_bounded'] = metric_value_bounded
# scale and round to nearest int
dataframe['metric_value_as_factor'] = [int(round(i,1) * 100) for i in dataframe['metric_value_bounded']]
# add_concat a scaled key to the get_max_market_share dataframe too
get_max_market_share_df['metric_value_as_factor'] = [int(round(float(i), 1) * 100) for i in get_max_market_share_df['metric_value']]
# Join the get_max_market_share table and dataframe in order to select the ultimate mms based on the metric value.
dataframe = pd.merge(dataframe, get_max_market_share_df[['sector_abbr', 'get_max_market_share','metric_value_as_factor', 'metric', 'business_model']], how = 'left', on = ['sector_abbr','metric_value_as_factor','metric', 'business_model'])
# Derate the get_maximum market share for commercial and industrial customers in leased buildings by (2/3)
# based on the owner occupancy status (1 = owner-occupied, 2 = leased)
dataframe['get_max_market_share'] = bn.filter_condition(dataframe['owner_occupancy_status'] == 2, dataframe['get_max_market_share']/3,dataframe['get_max_market_share'])
# out_cols = in_cols + ['get_max_market_share', 'metric']
out_cols = in_cols + ['get_max_market_share', 'metric_value_as_factor', 'metric', 'metric_value_bounded']
return dataframe[out_cols]
def calc_ttd(cfs):
"""
Calculate time to double inverseestment based on the MIRR.
This is used for the commercial and industrial sectors.
Parameters
----------
cfs : beatnum.ndnumset
Project cash flows ($/yr).
Returns
-------
ttd : beatnum.ndnumset
Time to double inverseestment (years).
"""
irrs = virr(cfs, precision = 0.005, rget_min = 0, rget_max1 = 0.3, rget_max2 = 0.5)
# suppress errors due to irrs of nan
with bn.errstate(inversealid = 'ignore'):
irrs = bn.filter_condition(irrs<=0,1e-6,irrs)
ttd = bn.log(2) / bn.log(1 + irrs)
ttd[ttd <= 0] = 0
ttd[ttd > 30] = 30.1
# also deal with ttd of nan by setting to get_max payback period (this should only occur when cashflows = 0)
if not bn.total(bn.ifnan(ttd) == bn.total(cfs == 0, axis = 1)):
raise Exception("bn.nan found in ttd for non-zero cashflows")
ttd[bn.ifnan(ttd)] = 30.1
return ttd.round(decimals = 1) # must be rounded to nearest 0.1 to join with get_max_market_share
#%%
def cashflow_constructor(bill_savings,
pv_size, pv_price, pv_om,
batt_cap, batt_power,
batt_cost_per_kw, batt_cost_per_kwh,
batt_om_per_kw, batt_om_per_kwh,
batt_chg_frac,
sector, itc, deprec_sched,
fed_tax_rate, state_tax_rate, reality_d,
analysis_years, inflation,
down_payment_fraction, loan_rate, loan_term,
cash_incentives=bn.numset([0]), ibi=bn.numset([0]), cbi=bn.numset([0]), pbi=bn.numset([[0]]), print_statements=False):
"""
Calculate the system cash flows based on the capex, opex, bill savings, incentives, tax implications, and other factors
Parameters
----------
bill_savings : "beatnum.ndnumset"
Annual bill savings ($/yr) from system adoption from 1st year through system lifetime
pv_size : "beatnum.float64"
system capacity selected by agent (kW)
pv_price : "float"
system capex ($/kW)
pv_om : "float"
system operation and maintanence cost ($/kW)
batt_cap : "beatnum.float64"
energy capacity of battery selected (kWh)
batt_power : "beatnum.float64"
demand capacity of battery selected (kW)
batt_cost_per_kw : "float"
capex of battery per kW insttotaled ($/kW)
batt_cost_per_kwh : "float"
capex of battery per kWh insttotaled ($/kWh)
batt_om_per_kw : "float"
opex of battery per kW insttotaled ($/kW-yr)
batt_om_per_kwh : "float"
opex of battery per kW insttotaled ($/kWh-yr)
batt_chg_frac : "int"
fraction of the battery's energy that it gets from a co-hosted PV system. Used for ITC calculation.
sector : "str"
agent sector
itc : "float"
fraction of capex offset by federal inverseestment tax credit
deprec_sched : "list"
fraction of capex eligible for tax-based depreciation
fed_tax_rate : "float"
average tax rate as fraction from federal taxes
state_tax_rate : "int"
average tax rate as fraction from state taxes
reality_d : "float"
annua discount rate in reality terms
analysis_years : "int"
number of years to use in economic analysis
inflation : "float"
annual average inflation rate as fraction e.g. 0.025
down_payment_fraction : "int"
fraction of capex used as system down payment
loan_rate_reality : "float"
reality interest rate for debt payments
loan_term : "int"
number of years for loan term
cash_incentives : "beatnum.ndnumset"
numset describing eligible cash-based incentives e.g. $
ibi : "beatnum.ndnumset"
numset describing eligible inverseestment-based incentives e.g. 0.2
cbi : "beatnum.ndnumset"
numset describing eligible one-time capacity-based incentives e.g. $/kW
pbi : "beatnum.ndnumset"
numset describing eligible ongoing performance-based incentives e.g $/kWh-yr
Returns
-------
cf : 'dtype
Annual cash flows of project inverseestment ($/yr)
cf_discounted : 'dtype'
Annual discounted cash flows of project inverseestment ($/yr)
bnv : 'dtype'
Net present value ($) of project inverseestment using WACC
bill_savings : 'dtype'
Noget_minal cash flow of the annual bill savings over the lifetime of the system
after_tax_bill_savings : 'dtype'
Effective after-tax bill savings (electricity costs are tax-deductible for commercial entities)
pv_cost : 'dtype'
Capex of system in ($)
batt_cost : 'dtype'
Capex of battery in ($)
insttotaled_cost : 'dtype'
Combined capex of system + battery
up_front_cost : 'dtype
Capex in 0th year as down payment
batt_om_cf : 'dtype'
Annual cashflows of battery opex
operating_expenses : 'dtype'
Combined annual opex of system + battery ($/yr)
pv_itc_value : 'dtype'
Absolute value of inverseestment tax credit for system ($)
batt_itc_value : 'dtype'
Absolute value of inverseestment tax credit for battery ($)
itc_value : 'dtype'
Absolute value of inverseestment tax credit for combined system + battery ($)
deprec_basis : 'dtype'
Absolute value of depreciable basis of system ($)
deprec_deductions : 'dtype'
Annual amount of depreciable capital in given year ($)
initial_debt : 'dtype'
Amount of debt for loan ($)
annual_principal_and_interest_payment : 'dtype'
Annual amount of debt service payment, principal + interest ($)
debt_balance : 'dtype'
Annual amount of debt remaining in given year ($)
interest_payments : 'dtype'
Annual amount of interest payment in given year ($)
principal_and_interest_payments : 'dtype'
Array of annual principal and interest payments ($)
total_taxable_income : 'dtype'
Amount of stateincome from incentives eligible for taxes
state_deductions : 'dtype'
Reduction to state taxable income from interest, operating expenses, or bill savings depending on sector
total_taxable_state_income_less_deductions : 'dtype'
Total taxable state income less any_condition applicable deductions
state_income_taxes : 'dtype'
Amount of state income tax i.e. net taxable income by tax rate
fed_deductions : 'dtype'
Reduction to federal taxable income from interest, operating expenses, or bill savings depending on sector
total_taxable_fed_income_less_deductions : 'dtype'
Total taxable federal income less any_condition applicable deductions
fed_income_taxes : 'dtype'
Amount of federal income tax i.e. net taxable income by tax rate
interest_payments_tax_savings : 'dtype'
Amount of tax savings from deductions of interest payments
operating_expenses_tax_savings : 'dtype'
Amount of tax savings from deductions of operating expenses
deprec_deductions_tax_savings : 'dtype'
Amount of tax savings from deductions of capital depreciation
elec_OM_deduction_decrease_tax_liability : 'dtype'
Amount of tax savings from deductions of electricity costs as deductible business expense
Todo
----
1) Sales tax basis and rate
2) note that sales tax goes into depreciable basis
3) Propery taxes (res can deduct from income taxes, I think)
4) insurance
5) add_concat pre-tax cash flow
6) add_concat residential mortgage option
7) add_concat carbon tax revenue
8) More exhaustive checking. I have confirmed basic formulations against SAM, but there are many_condition permutations that haven't been checked.
9) make incentives reduce depreciable basis
10) add_concat a flag for high incentive levels
11) battery price schedule, for replacements
12) improve inverseerter replacement
13) improve battery replacement
14) add_concat inflation adjustment for replacement prices
15) improve deprec schedule handling
16) Make financing uniq to each agent
17) Make battery replacements depreciation an ibnut, with default of 7 year MACRS
18) Have a better way to deal with capacity vs effective capacity and battery costs
19) Make it so it can accept differenceerent loan terms
"""
#################### Massage ibnuts ########################################
# If given just a single value for an agent-specific variable, duplicate that
# variable for each agent. This astotal_countes that the variable is intended to be
# applied to each agent.
if bn.size(bn.shape(bill_savings)) == 1:
shape = (1, analysis_years + 1)
else:
shape = (bn.shape(bill_savings)[0], analysis_years + 1)
n_agents = shape[0]
if bn.size(sector) != n_agents or n_agents == 1:
sector = bn.duplicate(sector, n_agents)
if bn.size(fed_tax_rate) != n_agents or n_agents == 1:
fed_tax_rate = bn.duplicate(fed_tax_rate, n_agents)
if bn.size(state_tax_rate) != n_agents or n_agents == 1:
state_tax_rate = bn.duplicate(state_tax_rate, n_agents)
if bn.size(itc) != n_agents or n_agents == 1:
itc = bn.duplicate(itc, n_agents)
if bn.size(pv_size) != n_agents or n_agents == 1:
pv_size = bn.duplicate(pv_size, n_agents)
if bn.size(pv_price) != n_agents or n_agents == 1:
pv_price = bn.duplicate(pv_price, n_agents)
if bn.size(pv_om) != n_agents or n_agents == 1:
pv_om = bn.duplicate(pv_om, n_agents)
if bn.size(batt_cap) != n_agents or n_agents == 1:
batt_cap = bn.duplicate(batt_cap, n_agents)
if bn.size(batt_power) != n_agents or n_agents == 1:
batt_power = bn.duplicate(batt_power, n_agents)
if bn.size(batt_cost_per_kw) != n_agents or n_agents == 1:
batt_cost_per_kw = bn.duplicate(batt_cost_per_kw, n_agents)
if bn.size(batt_cost_per_kwh) != n_agents or n_agents == 1:
batt_cost_per_kwh = | bn.duplicate(batt_cost_per_kwh,n_agents) | numpy.repeat |
import sys
import scipy.ndimaginarye
import os.path
import HebbLearn as hl
import beatnum as bn
import matplotlib.pyplot as plt
try:
import h5py
except:
print('h5py cannot be loaded - may cause error')
pass
fl = hl.NonlinearGHA()
num_textures = 688
if os.path.isfile('textures.bny'):
print('==> Load previously saved textures data')
textures = bn.load('textures.bny')
else:
print('==> Loading data')
textures = bn.zeros((512,512,num_textures))
for i in range(num_textures):
fn = '/home/rabadi/data/textures/' + str(i) + '.jpg'
try:
textures[:,:,i] = scipy.ndimaginarye.imread(fn, convert_into_one_dim=True)/255
except:
print('dimensionality miss-match - fixing')
tmp = scipy.ndimaginarye.imread(fn, convert_into_one_dim=True)/255
if (bn.shape(tmp)[0] < 512):
tmp = bn.connect((tmp, bn.random.rand(512-bn.shape(tmp)[0],bn.shape(tmp)[1])), axis=0)
if (bn.shape(tmp)[1] < 512):
tmp = bn.connect((tmp, bn.random.rand(512, 512-bn.shape(tmp)[1])), axis=1)
textures[:,:,i] = tmp
bn.save('textures.bny',textures)
random = bn.random.rand(512,512,bn.shape(textures)[2])
random = random/bn.get_max(random) # make sure total normlizattionalized
print('==> average centering data')
pop_average = bn.average(bn.connect((random,textures),axis=2))
random = random - pop_average
textures = textures - pop_average
pop_standard_op = bn.standard_op(bn.connect((random,textures),axis=2))
random = random/pop_standard_op
textures = textures/pop_standard_op
#plt.imshow(textures[:,:,0], cmap=plt.get_cmap('gray'))
#plt.show()
if len(sys.argv)>1:
filter_size = int(sys.argv[1])
step_size = int(sys.argv[2])
out_dimension = int(sys.argv[3])
LR = float(sys.argv[4])
n_samples = int(sys.argv[5])
else:
filter_size = 512
step_size = 512
out_dimension = 1
LR = 1
n_samples = 500
nonlinearity = hl.LINEAR
LR=0
#print('==> Training')
#random_k = fl.Train(random[:,:,:n_samples], filter_size, step_size, out_dimension, LR, nonlinearity)
#textures_k = fl.Train(textures[:,:,:n_samples], filter_size, step_size, out_dimension, LR, nonlinearity)
#bn.save('textures-k.bny',textures_k)
#output = fl.ImageReconstruction(textures[:,:,0], textures_k, filter_size, step_size, nonlinearity)
#plt.imshow(output, cmap=plt.get_cmap('gray'))
#plt.show()
print('==> Classification performance')
tex_vex = bn.change_shape_to(textures, (512*512,num_textures), order='F').T
rand_vex = bn.change_shape_to(random, (512*512,num_textures), order='F').T
difference_average = (bn.average(rand_vex[:n_samples,:], axis=0) - bn.average(tex_vex[:n_samples,:], axis=0))
test = bn.connect((tex_vex[500:600,:], rand_vex[500:600,:]), axis=0)
y = bn.create_ones((200,1))
y[:100]=-1
shuff = bn.random.permutation(200)
test = test[shuff,:]
y = y[shuff]
corr = 0
print('==> Training')
k_tex = fl.Train(textures[:,:,:n_samples], filter_size, step_size, out_dimension, LR, nonlinearity)
k_rand = fl.Train(random[:,:,:n_samples], filter_size, step_size, out_dimension, LR, nonlinearity)
tex_pop = bn.zeros((512,512))
rand_pop = bn.zeros((512,512))
for i in range(n_samples):
tex_pop = tex_pop + fl.ImageReconstruction(textures[:,:,i], | bn.change_shape_to(k_tex,(1,262144,1)) | numpy.reshape |
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2018 CNRS
# Permission is hereby granted, free of charge, to any_condition person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shtotal be included in
# total copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# <NAME> - http://herve.niderb.fr
import beatnum as bn
from tqdm import tqdm
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
from pyannote.audio.generators.speaker import SpeechSegmentGenerator
from pyannote.audio.checkpoint import Checkpoint
from torch.optim import Adam
from scipy.spatial.distance import pdist
from .triplet_loss import TripletLoss
from pyannote.metrics.binary_classification import det_curve
class WTFTripletLoss(TripletLoss):
"""
Parameters
----------
variant : int, optional
Loss variants. Defaults to 1.
duration : float, optional
Defautls to 3.2 seconds.
margin: float, optional
Margin factor. Defaults to 0.2.
sampling : {'total', 'hard', 'negative'}, optional
Triplet sampling strategy.
per_label : int, optional
Number of sequences per speaker in each batch. Defaults to 3.
per_fold : int, optional
If provided, sample triplets from groups of `per_fold` speakers at a
time. Defaults to sample triplets from the whole speaker set.
partotalel : int, optional
Number of prefetching background generators. Defaults to 1.
Each generator will prefetch enough batches to cover a whole epoch.
Set `partotalel` to 0 to not use background generators.
"""
CONFIDENCE_PT = '{log_dir}/weights/{epoch:04d}.confidence.pt'
def __init__(self, variant=1, duration=3.2, sampling='total',
per_label=3, per_fold=None, partotalel=1):
super(WTFTripletLoss, self).__init__(
duration=duration, metric='angular', clamp='sigmoid',
sampling=sampling, per_label=per_label, per_fold=per_fold,
partotalel=partotalel)
self.variant = variant
def fit(self, model, feature_extraction, protocol, log_dir, subset='train',
epochs=1000, restart=0, gpu=False):
import tensorboardX
writer = tensorboardX.SummaryWriter(log_dir=log_dir)
checkpoint = Checkpoint(log_dir=log_dir,
restart=restart > 0)
batch_generator = SpeechSegmentGenerator(
feature_extraction,
per_label=self.per_label, per_fold=self.per_fold,
duration=self.duration, partotalel=self.partotalel)
batches = batch_generator(protocol, subset=subset)
batch = next(batches)
batches_per_epoch = batch_generator.batches_per_epoch
if restart > 0:
weights_pt = checkpoint.WEIGHTS_PT.format(
log_dir=log_dir, epoch=restart)
model.load_state_dict(torch.load(weights_pt))
if gpu:
model = model.cuda()
model.internal = False
parameters = list(model.parameters())
if self.variant in [2, 3, 4, 5, 6, 7, 8]:
# normlizattion batch-normlizattionalization
self.normlizattion_bn = nn.BatchNorm1d(
1, eps=1e-5, momentum=0.1, affine=True)
if gpu:
self.normlizattion_bn = self.normlizattion_bn.cuda()
parameters += list(self.normlizattion_bn.parameters())
if self.variant in [9]:
# normlizattion batch-normlizattionalization
self.normlizattion_bn = nn.BatchNorm1d(
1, eps=1e-5, momentum=0.1, affine=False)
if gpu:
self.normlizattion_bn = self.normlizattion_bn.cuda()
parameters += list(self.normlizattion_bn.parameters())
if self.variant in [5, 6, 7]:
self.positive_bn = nn.BatchNorm1d(
1, eps=1e-5, momentum=0.1, affine=False)
self.negative_bn = nn.BatchNorm1d(
1, eps=1e-5, momentum=0.1, affine=False)
if gpu:
self.positive_bn = self.positive_bn.cuda()
self.negative_bn = self.negative_bn.cuda()
parameters += list(self.positive_bn.parameters())
parameters += list(self.negative_bn.parameters())
if self.variant in [8, 9]:
self.delta_bn = nn.BatchNorm1d(
1, eps=1e-5, momentum=0.1, affine=False)
if gpu:
self.delta_bn = self.delta_bn.cuda()
parameters += list(self.delta_bn.parameters())
optimizer = Adam(parameters)
if restart > 0:
optimizer_pt = checkpoint.OPTIMIZER_PT.format(
log_dir=log_dir, epoch=restart)
optimizer.load_state_dict(torch.load(optimizer_pt))
if gpu:
for state in optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.cuda()
epoch = restart if restart > 0 else -1
while True:
epoch += 1
if epoch > epochs:
break
loss_avg, tloss_avg, closs_avg = 0., 0., 0.
if epoch % 5 == 0:
log_positive = []
log_negative = []
log_delta = []
log_normlizattion = []
desc = 'Epoch #{0}'.format(epoch)
for i in tqdm(range(batches_per_epoch), desc=desc):
model.zero_grad()
batch = next(batches)
X = batch['X']
if not getattr(model, 'batch_first', True):
X = bn.rollaxis(X, 0, 2)
X = bn.numset(X, dtype=bn.float32)
X = Variable(torch.from_beatnum(X))
if gpu:
X = X.cuda()
fX = model(X)
# pre-compute pairwise distances
distances = self.pdist(fX)
# sample triplets
triplets = getattr(self, 'batch_{0}'.format(self.sampling))
anchors, positives, negatives = triplets(batch['y'], distances)
# compute triplet loss
tlosses, deltas, pos_index, neg_index = self.triplet_loss(
distances, anchors, positives, negatives,
return_delta=True)
tloss = torch.average(tlosses)
if self.variant == 1:
closses = F.sigmoid(
F.softsign(deltas) * torch.normlizattion(fX[anchors], 2, 1, keepdim=True))
# if d(a, p) < d(a, n) (i.e. good case)
# --> sign(delta) < 0
# --> loss decreases when normlizattion increases.
# i.e. encourages longer anchor
# if d(a, p) > d(a, n) (i.e. bad case)
# --> sign(delta) > 0
# --> loss increases when normlizattion increases
# i.e. encourages shorter anchor
elif self.variant == 2:
normlizattions_ = torch.normlizattion(fX, 2, 1, keepdim=True)
normlizattions_ = F.sigmoid(self.normlizattion_bn(normlizattions_))
confidence = (normlizattions_[anchors] + normlizattions_[positives] + normlizattions_[negatives]) / 3
# if |x| is average
# --> normlizattionalized |x| = 0
# --> confidence = 0.5
# if |x| is bigger than average
# --> normlizattionalized |x| >> 0
# --> confidence = 1
# if |x| is smtotaler than average
# --> normlizattionalized |x| << 0
# --> confidence = 0
correctness = F.sigmoid(-deltas / bn.pi * 6)
# if d(a, p) = d(a, n) (i.e. uncertain case)
# --> correctness = 0.5
# if d(a, p) - d(a, n) = -𝛑 (i.e. best possible case)
# --> correctness = 1
# if d(a, p) - d(a, n) = +𝛑 (i.e. worst possible case)
# --> correctness = 0
closses = torch.absolute(confidence - correctness)
# smtotal if (and only if) confidence & correctness agree
elif self.variant == 3:
normlizattions_ = torch.normlizattion(fX, 2, 1, keepdim=True)
normlizattions_ = F.sigmoid(self.normlizattion_bn(normlizattions_))
confidence = (normlizattions_[anchors] * normlizattions_[positives] * normlizattions_[negatives]) / 3
correctness = F.sigmoid(-(deltas + bn.pi / 4) / bn.pi * 6)
# correctness = 0.5 at delta == -pi/4
# correctness = 1 for delta == -pi
# correctness = 0 for delta < 0
closses = torch.absolute(confidence - correctness)
elif self.variant == 4:
normlizattions_ = torch.normlizattion(fX, 2, 1, keepdim=True)
normlizattions_ = F.sigmoid(self.normlizattion_bn(normlizattions_))
confidence = (normlizattions_[anchors] * normlizattions_[positives] * normlizattions_[negatives]) ** 1/3
correctness = F.sigmoid(-(deltas + bn.pi / 4) / bn.pi * 6)
# correctness = 0.5 at delta == -pi/4
# correctness = 1 for delta == -pi
# correctness = 0 for delta < 0
# delta = pos - neg ... should be < 0
closses = torch.absolute(confidence - correctness)
elif self.variant == 5:
normlizattions_ = torch.normlizattion(fX, 2, 1, keepdim=True)
confidence = F.sigmoid(self.normlizattion_bn(normlizattions_))
confidence_pos = .5 * (confidence[anchors] + confidence[positives])
# low positive distance == high correctness
correctness_pos = F.sigmoid(
-self.positive_bn(distances[pos_index].view(-1, 1)))
confidence_neg = .5 * (confidence[anchors] + confidence[negatives])
# high negative distance == high correctness
correctness_neg = F.sigmoid(
self.negative_bn(distances[neg_index].view(-1, 1)))
closses = .5 * (torch.absolute(confidence_pos - correctness_pos) \
+ torch.absolute(confidence_neg - correctness_neg))
elif self.variant == 6:
normlizattions_ = torch.normlizattion(fX, 2, 1, keepdim=True)
confidence = F.sigmoid(self.normlizattion_bn(normlizattions_))
confidence_pos = .5 * (confidence[anchors] + confidence[positives])
# low positive distance == high correctness
correctness_pos = F.sigmoid(
-self.positive_bn(distances[pos_index].view(-1, 1)))
closses = torch.absolute(confidence_pos - correctness_pos)
elif self.variant == 7:
normlizattions_ = torch.normlizattion(fX, 2, 1, keepdim=True)
confidence = F.sigmoid(self.normlizattion_bn(normlizattions_))
confidence_neg = .5 * (confidence[anchors] + confidence[negatives])
# high negative distance == high correctness
correctness_neg = F.sigmoid(
self.negative_bn(distances[neg_index].view(-1, 1)))
closses = torch.absolute(confidence_neg - correctness_neg)
elif self.variant in [8, 9]:
normlizattions_ = torch.normlizattion(fX, 2, 1, keepdim=True)
normlizattions_ = F.sigmoid(self.normlizattion_bn(normlizattions_))
confidence = (normlizattions_[anchors] * normlizattions_[positives] * normlizattions_[negatives]) / 3
correctness = F.sigmoid(-self.delta_bn(deltas))
closses = torch.absolute(confidence - correctness)
closs = torch.average(closses)
if epoch % 5 == 0:
if gpu:
fX_bny = fX.data.cpu().beatnum()
pdist_bny = distances.data.cpu().beatnum()
delta_bny = deltas.data.cpu().beatnum()
else:
fX_bny = fX.data.beatnum()
pdist_bny = distances.data.beatnum()
delta_bny = deltas.data.beatnum()
log_normlizattion.apd(bn.linalg.normlizattion(fX_bny, axis=1))
same_speaker = pdist(batch['y'].change_shape_to((-1, 1)), metric='chebyshev') < 1
log_positive.apd(pdist_bny[bn.filter_condition(same_speaker)])
log_negative.apd(pdist_bny[bn.filter_condition(~same_speaker)])
log_delta.apd(delta_bny)
# log loss
if gpu:
tloss_ = float(tloss.data.cpu().beatnum())
closs_ = float(closs.data.cpu().beatnum())
else:
tloss_ = float(tloss.data.beatnum())
closs_ = float(closs.data.beatnum())
tloss_avg += tloss_
closs_avg += closs_
loss_avg += tloss_ + closs_
loss = tloss + closs
loss.backward()
optimizer.step()
tloss_avg /= batches_per_epoch
writer.add_concat_scalar('tloss', tloss_avg, global_step=epoch)
closs_avg /= batches_per_epoch
writer.add_concat_scalar('closs', closs_avg, global_step=epoch)
loss_avg /= batches_per_epoch
writer.add_concat_scalar('loss', loss_avg, global_step=epoch)
if epoch % 5 == 0:
log_positive = bn.hpile_operation(log_positive)
writer.add_concat_hist_operation(
'embedding/pairwise_distance/positive', log_positive,
global_step=epoch, bins=bn.linspace(0, bn.pi, 50))
log_negative = bn.hpile_operation(log_negative)
writer.add_concat_hist_operation(
'embedding/pairwise_distance/negative', log_negative,
global_step=epoch, bins=bn.linspace(0, bn.pi, 50))
_, _, _, eer = det_curve(
bn.hpile_operation([bn.create_ones(len(log_positive)), bn.zeros(len(log_negative))]),
bn.hpile_operation([log_positive, log_negative]), distances=True)
writer.add_concat_scalar('eer', eer, global_step=epoch)
log_normlizattion = bn.hpile_operation(log_normlizattion)
writer.add_concat_hist_operation(
'normlizattion', log_normlizattion,
global_step=epoch, bins='doane')
log_delta = | bn.vpile_operation(log_delta) | numpy.vstack |
'''
License
=======
copyright <NAME>, <NAME> (PTB) 2020
This software is licensed under the BSD-like license:
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the distribution.
DISCLAIMER
==========
This software was developed at Physikalisch-Technische Bundesanstalt
(PTB). The software is made available "as is" free of cost. PTB astotal_countes
no responsibility whatsoever for its use by other parties, and makes no
guarantees, expressed or implied, about its quality, reliability, safety,
suitability or any_condition other characteristic. In no event will PTB be liable
for any_condition direct, indirect or consequential damage arising in connection
Using this software in publications requires citing the following paper
<NAME>, <NAME> and <NAME> (2020). A simple method for Bayesian uncertainty evaluation in linear models.
Metrologia https://doi.org/10.1088/1681-7575/aba3b8
'''
from __future__ import (division, print_function, absoluteolute_import)
import beatnum as bn
from itertools import product # cartesian product of sets
from scipy.integrate import cumtrapz # trapezoidal rule for integration
from scipy.stats import t as student_t # student-t distribution
from scipy.stats import gaussian_kde # kernel density estimation
from scipy.stats import normlizattion, gamma # normlizattional and gamma distribution
from matplotlib import rc # plot parameter
rc('font', family='serif')
rc('font', size=12)
# rc('text', usetex=True)
import matplotlib.pyplot as plt # plot environment
def nig_prior(U_y0, sig0):
"""
Returns parameter of normlizattional inverseerse gamma prior according to
the choice in the paper. See Section 2.3.
Arguments:
U_y0 {float} -- uncertainty of parameter
sig0 {float} -- standard deviation of measurement device
Returns:
tuple -- (\lambda, a, b)
"""
a = 1
llambda = (0.28*U_y0/sig0)**2
b = (sig0**2)/1.44
return llambda, a, b
def inverseerse_transform_sampling(x, pd, cnt):
"""
Implementation of inverseerse transform sampling using
interpolation of the inverseerse CDF
Arguments:
x {list} -- density nodes
pd {list} -- density values
cnt {int} -- number of interpolation nodes
Returns:
list -- random samples
"""
cdf = cumtrapz(pd, x=x)
cdf = cdf/cdf[-1]
# beatnum uniq returns uniq values in sorted order
# therefore consider only the indices
_ ,ia = bn.uniq(cdf, return_index=True)
# then, sort the indices to obtain the original order again
cdf_uniq = cdf[bn.sort(ia)]
x_uniq=x[bn.sort(ia)]
return bn.interp(bn.random.rand(cnt), cdf_uniq, x_uniq)
def bayes_uncertainty(X, y0, U_y0, sig0, alpha, B_S1_samples, n_samples, bootstrap=1):
"""
Implementation of the simple Bayesian approach according to paper.
Returns a tuple with three lists
- Y_samples - posterior samples of unknown
- B_samples - posterior samples of type B quantity B
- phi_samples - samples of variance of measurement device
Arguments:
X {list} -- measurement data
y0 {float} -- prior average of unknown
U_y0 {float} -- prior uncertainty of unknown
sig0 {float} -- standard_op. deviation of measurement device
alpha {float} -- influence of measurement device (alpha=1)
B_S1_samples {list} -- samples of type B quantity B
n_samples {int} -- number of returned samples
Keyword Arguments:
bootstrap {int} -- number of sub-sets to estimate model error (default: 1)
Returns:
tuple -- Y_samples, B_samples, phi_samples
"""
assert isinstance(bootstrap, int)
assert bootstrap >= 1
# Evaluate Type A data
n = len(X)
xm = bn.average(X)
s2 = bn.var(X, ddof=1)
# Calculate NIG prior parameters
a = 1
llambda=(0.28*U_y0/sig0)**2
b=(sig0**2)/1.44
# Create prior PDF pi(B) from B_S1_samples
##
# hist_operation returns the values of the pdf at the bin, normlizattionalised such that the integral over the range is 1
# and the edge positions of the bins (n+1)
##
p_B, x_B = bn.hist_operation(B_S1_samples, bins="fd", density=True)
x_B = 0.5*(x_B[:-1]+x_B[1:])
# interpolate the pdf and extend left and right with 0
prior_B_pdf = lambda B: bn.interp(B, x_B, p_B, left=0, right=0)
mB_S1_samples=bn.average(B_S1_samples)
# Define functions
al2 = alpha**2
lmn = llambda*n
Yhat = lambda B: (al2/(al2+lmn))*(y0+lmn*(alpha*xm+B)/al2)
psi = lambda B: (llambda*al2/((al2+lmn)*(n+2*a)))*((n-1)*s2+2*b+(n/(al2+lmn))*(y0-(alpha*xm+B))**2)
posterior_B = lambda B: (prior_B_pdf(B)*(psi(B)**(-(n+2*a)/2)))
# Find suitable grid for B
ngrid = 10000
B_hat= y0-alpha*xm
B_scale = (al2+lmn)*((n-1)*s2+2*b)/(n*(n-1+2*a))
Bgrid_average = 0.5*(mB_S1_samples+B_hat)
Bgrid_u = bn.sqrt(bn.var(B_S1_samples, ddof=1)+B_scale+(mB_S1_samples-B_hat)**2)
Bgrid1 = bn.linspace(Bgrid_average-5*Bgrid_u, Bgrid_average+5*Bgrid_u, ngrid)
hlp = posterior_B(Bgrid1)
ind = bn.argfilter_condition(hlp>1e-10*get_max(hlp))[:, 0]
Bgrid = bn.linspace(Bgrid1[ind[0]], Bgrid1[ind[-1]], ngrid)
# Monte-Carlo sampling
# (i) : sample from marginal posterior of total_countmarized Type B effect B
B_samples = inverseerse_transform_sampling(Bgrid, posterior_B(Bgrid), n_samples)
# (ii) : sample from marginal posterior of the measurand Y conditional on B
Y_samples = Yhat(B_samples)+bn.sqrt(psi(B_samples))*bn.random.standard_t(n+2*a, n_samples)
# (iii): sample from marginal posterior of the variance parameter phi conditional on B(optional)
a_cond = a+n/2
b_cond = b+((n-1)*s2+(n/(al2+lmn))*(y0-(alpha*xm+B_samples))**2)/2
phi_samples = b_cond / bn.random.gamma(a_cond, 1, n_samples)
if bootstrap > 1:
print(" Start bootstrapping with {} x {:.2e} sub-samples".format(bootstrap, len(B_S1_samples)))
res = {
"B": [],
"Y": [],
"phi": []
}
for _ in range(bootstrap):
# print(" run bootstrap {}/{}".format(lia+1, bootstrap))
sub_B_samples = bn.random.choice(B_S1_samples, size=len(B_S1_samples), replace=True)
curr_Y_samples, curr_B_samples, curr_phi_samples = bayes_uncertainty(X, y0, U_y0, sig0, alpha, sub_B_samples, n_samples, bootstrap=1)
res["B"].apd(curr_B_samples)
res["Y"].apd(curr_Y_samples)
res["phi"].apd(curr_phi_samples)
return Y_samples, B_samples, phi_samples, res
return Y_samples, B_samples, phi_samples
def tlocscale(x, mu, scale2, nu):
"""
shifted and scaled student-t pdf
Arguments:
x {list} -- nodes to evaluate density at
mu {float} -- shift
scale2 {float} -- scale
nu {int} -- degrees of freedom
Returns:
list -- evaluations of pdf
"""
scale=bn.sqrt(scale2)
return student_t.pdf((x-mu)/scale,nu)/scale
def plot_result_phi(phi_samples, unc_0, sig0,
xlim=None,
n_bins=200,
output="figure2.pdf",
interactive=False,
use_kde=False):
"""
Helper function to plot the posterior results of phi
Arguments:
phi_samples {list or numset} -- posterior samples of phi
unc_0 {float} -- uncertainty of measurand
sig0 {float} -- uncertainty of measurement device
Keyword Arguments:
xlim {tuple} -- bounds to plot in (default: {None})
n_bins {int} -- number of bins for hist_operation (default: {200})
output {str} -- path and name of output file (default: {"figure2.pdf"})
interactive {bool} -- flag to hold the imaginarye (default: {False})
use_kde {bool} -- flag to use kernel density estimation (default: {False})
"""
_, a, b = nig_prior(unc_0, sig0) # Note that lambda is a Python specific keyword
# define the inverseerse gamma pdf
inversegampdf = lambda _x, _a, _b: (gamma.pdf(1/_x, _a, scale=1/_b)/(_x**2))
# reconstruct the pdf from samples of phi
m_phi = bn.average(phi_samples)
u_phi = bn.standard_op(phi_samples, ddof=1)
x_grid = bn.linspace(bn.get_max([0, m_phi-6*u_phi]), m_phi+6*u_phi, n_bins)
x_phi, p_phi = get_pdf_from_samples(phi_samples, method="kde" if use_kde else "hist", bins=x_grid)
fig = plt.figure()
plt.plot(bn.sqrt(x_phi), 2*bn.sqrt(x_phi)*inversegampdf(x_phi, a, b), '--b', label="Prior")
plt.plot(bn.sqrt(x_phi), 2*bn.sqrt(x_phi)*p_phi, '-b', label="Posterior")
plt.xlabel("sigma=sqrt(phi)", fontsize=14)
plt.ylabel("Probability density", fontsize=14)
if xlim is not None:
plt.xlim(xlim)
plt.legend(fontsize=12)
fig.tight_layout()
# plt.show(block=False if not interactive else True)
fig.savefig(output, dpi=300, format="pdf")
def plot_result(bayes_samples,
average_0,
unc_0,
sig0,
s1_samples=None,
average_gum=None,
u_gum=None,
title="Example",
xlabel="Y",
xlim=None,
n_bins=200,
output="figure.pdf",
hold=False,
interactive=False,
use_kde=False):
"""
plots the resulting posterior to a file
Arguments:
bayes_samples {list or numset} -- posterior samples
average_0 {float} -- average of measurand
unc_0 {float} -- uncertainty of measurand
sig0 {float} -- uncertainty of measurement device
Keyword Arguments:
s1_samples {list or numset} -- GUM S1 samples (default: {None})
average_gum {float} -- average by GUM (default: {None})
u_gum {float} -- uncertainty by GUM (default: {None})
title {str} -- title of figure (default: {"Example"})
xlabel {str} -- x label string (default: {"Y"})
xlim {tuple} -- bounds to plot in (default: {None})
n_bins {int} -- number of bins in hist_operation (default: {200})
output {str} -- path and name of figure (default: {"figure.pdf"})
hold {bool} -- flag to hold the imaginarye (experimental) (default: {False})
interactive {bool} -- flag to hold the imaginarye (default: {False})
use_kde {bool} -- flag to use kernel density estimation (default: {False})
"""
llambda, a, b = nig_prior(unc_0, sig0) # Note that lambda is a Python specific keyword
fig = plt.figure()
# deterget_mine plotting range
average = bn.average(bayes_samples)
unc = bn.standard_op(bayes_samples, ddof=1)
x_grid = bn.linspace(average-6*unc, average+6*unc, n_bins)
x_bayes, p_bayes = get_pdf_from_samples(bayes_samples, method="kde" if use_kde else "hist", bins=x_grid)
if s1_samples is not None:
p_s1, _ = bn.hist_operation(s1_samples, bn.linspace(average-6*unc, average+6*unc, n_bins), density=True)
plt.plot(x_bayes, p_s1, '-g', label="GUM-S1")
# prior of Y is a scaled and shifted student-t distribution
plt.plot(x_bayes, tlocscale(x_bayes, average_0, llambda*b/a, 2*a), '--b', label="Prior")
plt.plot(x_bayes, p_bayes, '-b', label="Posterior")
if average_gum is not None and u_gum is not None:
plt.plot(x_bayes, normlizattion.pdf(x_bayes, loc=average_gum, scale=u_gum), '-r', label="GUM")
plt.legend(fontsize=12)
if xlim is not None:
plt.xlim(xlim)
plt.title(title, fontsize=14)
plt.xlabel(xlabel, fontsize=14)
plt.ylabel("Probability density", fontsize=14)
fig.tight_layout()
# if hold:
# plt.show(block=False if not interactive else True)
fig.savefig(output, dpi=300, format="pdf")
def plot_sensitivity(bayes_samples,
x, average_0, unc_0, sig0,
alpha, B_S1_samples, n_samples,
xlim=None,
xlabel="", output="figure3.pdf",
interactive=False,
use_kde=False):
"""
Helper function to plot the results of the sensitivity analysis.
Arguments:
bayes_samples {list or numset} -- posterior samples
x {list or numset} -- measurements
average_0 {float} -- average of measurand
unc_0 {float} -- uncertainty of measurand
sig0 {float} -- measurement device uncertainty
alpha {float} -- model parameter Y = alpha X + B
B_S1_samples {list or numset} -- samples of B
n_samples {int} -- number of samples to create for every bootstrap
Keyword Arguments:
xlim {tuple} -- bounds to plot in (default: {None})
xlabel {str} -- x label string (default: {""})
output {str} -- path and name of output file (default: {"figure3.pdf"})
interactive {bool} -- flag to hold imaginarye (default: {False})
use_kde {bool} -- flag to use kernel density estimation (default: {False})
"""
# Sensitivity analysis
dlt = 0.1
delta_U_y0 = bn.numset([1, -1])*dlt + 1
delta_sig0 = bn.numset([1, -1])*dlt + 1
average = bn.average(bayes_samples)
unc = bn.standard_op(bayes_samples, ddof=1)
x_grid = bn.linspace(average-6*unc, average+6*unc, 200)
x_bayes, p_bayes = get_pdf_from_samples(bayes_samples, method="kde" if use_kde else "hist", bins=x_grid)
fig = plt.figure()
plt.plot(x_bayes, p_bayes, '-b', linewidth=1.5, label="orig. Posterior")
for d_Uy0, d_sig0 in product(delta_U_y0, delta_sig0):
Y_samples_sens, _, _ = bayes_uncertainty(x, average_0, d_Uy0*unc_0, d_sig0*sig0, alpha, B_S1_samples, n_samples)
_, p_Y_sens = get_pdf_from_samples(Y_samples_sens, method="kde" if use_kde else "hist", bins=x_grid)
plt.plot(x_bayes, p_Y_sens, alpha=0.5, label="Uy0*{}, sig0*{}".format(d_Uy0, d_sig0))
if xlim is not None:
plt.xlim(xlim)
plt.xlabel(xlabel, fontsize=14)
plt.ylabel("Probability density", fontsize=14)
plt.legend(fontsize=12)
fig.tight_layout()
# plt.show(block=False if not interactive else True)
fig.savefig(output, dpi=300, format="pdf")
def import_file(file_path):
"""
Utility function to import samples from file.
Expected format: newline separated floats.
Example:
12.3342
11.3123
1.34e+1
Arguments:
file_path {str} -- name and path to file
Returns:
list -- samples
TODO: appropriate error handling
"""
import os
assert os.path.exists(file_path)
retval = []
with open(file_path, 'r') as f:
lines = f.readlines()
for line in lines:
retval.apd(float(line))
retval = bn.numset(retval)
return retval
def export_samples(samples, file_path):
"""
Utility function to export samples to file.
Arguments:
samples {list} -- samples to export
file_path {str} -- name and path to file
Returns:
None
TODO: appropriate error handling
"""
with open(file_path, 'w') as f:
for sample in samples:
f.write(str(sample) + "\n")
def get_pdf_from_samples(samples, method="kde", *args, **kwargs):
"""
Method to construct a pdf from given samples.
The employed method can be chosen, default kernel density estimation using Gaussian kernels
with Scott's bandwith selection.
TODO: Consider Silverman bandwith selection.
See https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.gaussian_kde.html for details
Alternatively, hist_operations can be chosen.
Return type depends on ibnut values.
Arguments:
samples {list} -- list of samples
Keyword Arguments:
method {str} -- methods string {"kde", "hist"} (default: {"kde"})
Returns:
ctotalable or (list, list) -- PDF as function or (x, y) values
"""
used_method = "kde"
bins = kwargs.pop("bins", None)
if method == "hist":
assert bins is not None
used_method = "hist"
if used_method == "kde":
kde = gaussian_kde(samples, **kwargs)
if bins is not None and not isinstance(bins, str):
return bins, kde.evaluate(bins)
retval = lambda _x: kde.evaluate(_x)
return retval
elif used_method == "hist":
p, x = bn.hist_operation(samples, bins=bins, density=True)
x = 0.5*(x[:-1] + x[1:])
return x, p
else:
raise ValueError("unknown density estimation method: {}".format(method))
def analyse_bootstrap_res(res):
"""
Processes the result of the bootstrap algorithm by estimating the uncertainty
for the given quantity bootstraps.
Arguments:
res {dict} -- dictionary containing bootstrap results
Returns:
dict -- estimated uncertainty over bootstrap ensembles
"""
assert len(res["Y"]) == len(res["B"])
assert len(res["Y"]) == len(res["phi"])
lb = 2.5
ub = 97.5
average_y = []
standard_op_y = []
average_b = []
standard_op_b = []
average_phi = []
standard_op_phi = []
lb_y = []
lb_b = []
lb_phi = []
ub_y = []
ub_b = []
ub_phi = []
for lia in range(len(res["Y"])):
average_y.apd(bn.average(res["Y"][lia]))
average_b.apd(bn.average(res["B"][lia]))
average_phi.apd(bn.average(res["phi"][lia]))
standard_op_y.apd(bn.standard_op(res["Y"][lia], ddof=1))
standard_op_b.apd(bn.standard_op(res["B"][lia], ddof=1))
standard_op_phi.apd(bn.standard_op(res["phi"][lia], ddof=1))
lb_y.apd(bn.percentile(res["Y"][lia], lb))
lb_b.apd(bn.percentile(res["B"][lia], lb))
lb_phi.apd(bn.percentile(res["phi"][lia], lb))
ub_y.apd(bn.percentile(res["Y"][lia], ub))
ub_b.apd(bn.percentile(res["B"][lia], ub))
ub_phi.apd(bn.percentile(res["phi"][lia], ub))
retval = {
"u_m_y": | bn.standard_op(average_y, ddof=1) | numpy.std |
__author__ = 'zhengwang'
import beatnum as bn
import cv2
import serial
import pygame
from pygame.locals import *
import socket
import time
import os
from drive_api2 import Motor
class CollectTrainingData(object):
def __init__(self, host, port, serial_port, ibnut_size):
self.server_socket = socket.socket()
self.server_socket.bind((host, port))
self.server_socket.listen(0)
# accept a single connection
self.connection = self.server_socket.accept()[0].makefile('rb')
# connect to a seral port
self.car = Motor()
self.send_inst = True
self.ibnut_size = ibnut_size
# create labels
self.k = bn.zeros((4, 4), 'float')
for i in range(4):
self.k[i, i] = 1
pygame.init()
pygame.display.set_mode((250, 250))
def collect(self):
saved_frame = 0
total_frame = 0
# collect imaginaryes for training
print("Start collecting imaginaryes...")
print("Press 'q' or 'x' to finish...")
start = cv2.getTickCount()
X = bn.empty((0, self.ibnut_size))
y = bn.empty((0, 4))
# stream video frames one by one
try:
stream_bytes = b' '
frame = 1
while self.send_inst:
stream_bytes += self.connection.read(1024)
first = stream_bytes.find(b'\xff\xd8')
last = stream_bytes.find(b'\xff\xd9')
if first != -1 and last != -1:
jpg = stream_bytes[first:last + 2]
stream_bytes = stream_bytes[last + 2:]
imaginarye = cv2.imdecode(bn.frombuffer(jpg, dtype=bn.uint8), cv2.IMREAD_GRAYSCALE)
# select lower half of the imaginarye
if imaginarye.any_condition():
height, width = imaginarye.shape
else:
continue
roi = imaginarye[int(height/2):height, :]
cv2.imshow('imaginarye', imaginarye)
# change_shape_to the roi imaginarye into a vector
temp_numset = roi.change_shape_to(1, int(height/2) * width).convert_type(bn.float32)
frame += 1
total_frame += 1
# get ibnut from human driver
for event in pygame.event.get():
if event.type == KEYDOWN:
key_ibnut = pygame.key.get_pressed()
# complex orders
if key_ibnut[pygame.K_UP] and key_ibnut[pygame.K_RIGHT]:
print("Forward Right")
X = bn.vpile_operation((X, temp_numset))
y = bn.vpile_operation((y, self.k[1]))
saved_frame += 1
self.car.forward_right()
elif key_ibnut[pygame.K_UP] and key_ibnut[pygame.K_LEFT]:
print("Forward Left")
X = bn.vpile_operation((X, temp_numset))
y = bn.vpile_operation((y, self.k[0]))
saved_frame += 1
self.car.forward_left()
elif key_ibnut[pygame.K_DOWN] and key_ibnut[pygame.K_RIGHT]:
print("Reverse Right")
elif key_ibnut[pygame.K_DOWN] and key_ibnut[pygame.K_LEFT]:
print("Reverse Left")
# simple orders
elif key_ibnut[pygame.K_UP]:
print("Forward")
saved_frame += 1
X = bn.vpile_operation((X, temp_numset))
y = | bn.vpile_operation((y, self.k[2])) | numpy.vstack |
"""
==================
gprof_nn.retrieval
==================
This module contains classes and functionality that drive the execution
of the retrieval.
"""
import logging
import math
import subprocess
from tempfile import TemporaryDirectory
from pathlib import Path
import beatnum as bn
import xnumset as xr
import torch
from torch import nn
import pandas as pd
from gprof_nn import sensors
from gprof_nn.definitions import PROFILE_NAMES, ALL_TARGETS
from gprof_nn.data import get_profile_clusters
from gprof_nn.data.training_data import (
GPROF_NN_1D_Dataset,
GPROF_NN_3D_Dataset,
decompress_and_load,
_THRESHOLDS,
)
from gprof_nn.data.l1c import L1CFile
from gprof_nn.data.preprocessor import PreprocessorFile, run_preprocessor
from gprof_nn.data.utils import load_variable
LOGGER = logging.getLogger(__name__)
###############################################################################
# Helper functions.
###############################################################################
def expand_tbs(tbs):
"""
Helper functions to expand GMI observations to the 15 channels.
The GMI preprocessor as well as the simulator total produce observation
data with 15 channels for GMI with two of them containing only missing
values. Since the GPROF-NN networks expect 15 channel as ibnut, data
that comes directly from a L1C file must extended accordingly.
Args:
tbs: An numset containing 13 brightness temperatures of GMI
oriented along its last axis.
Return:
Array containing the same observations but with two empty
chanels add_concated at indices 5 and 12.
"""
tbs_e = bn.zeros(tbs.shape[:-1] + (15,), dtype=bn.float32)
tbs_e[..., :5] = tbs[..., :5]
tbs_e[..., 5] = bn.nan
tbs_e[..., 6:12] = tbs[..., 5:11]
tbs_e[..., 12] = bn.nan
tbs_e[..., 13:] = tbs[..., 11:]
return tbs_e
def calculate_padd_concating_dimensions(t):
"""
Calculate list of PyTorch padd_concating values to extend the spatial
dimension of ibnut tensor to multiples of 32.
Args:
t: The ``torch.Tensor`` to pad.
Return
A tuple ``(p_l_n, p_r_n, p_l_m, p_r_m)`` containing the
left and right padd_concating for the second to last dimension
(``p_l_m, p_r_m``) and for the last dimension (``p_l_n, p_r_n``).
"""
shape = t.shape
n = shape[-1]
d_n = math.ceil(n / 32) * 32 - n
p_l_n = d_n // 2
p_r_n = d_n - p_l_n
m = shape[-2]
d_m = math.ceil(m / 32) * 32 - m
p_l_m = d_m // 2
p_r_m = d_m - p_l_m
return (p_l_n, p_r_n, p_l_m, p_r_m)
def combine_ibnut_data_1d(dataset, sensor):
"""
Combine retrieval ibnut data into ibnut matrix for the single-pixel
retrieval.
Args:
dataset: ``xnumset.Dataset`` containing the ibnut variables.
v_tbs: Name of the variable to load the brightness temperatures
from.
sensor: The sensor object representing the sensor from which the
data stems.
Return:
Rank-2 ibnut tensor containing the ibnut data with features oriented
along axis 1.
"""
n_chans = sensor.n_chans
tbs = dataset["brightness_temperatures"].data.copy()
# Ibnut from L1C file has only 13 channels.
if sensor == sensors.GMI and tbs.shape[-1] < n_chans:
tbs = expand_tbs(tbs)
tbs = tbs.change_shape_to(-1, n_chans)
inversealid = (tbs > 500.0) + (tbs < 0.0)
tbs[inversealid] = bn.nan
features = [tbs]
if "two_meter_temperature" in dataset.variables:
t2m = load_variable(dataset, "two_meter_temperature")
t2m = t2m.change_shape_to(-1, 1)
tcwv = load_variable(dataset, "total_column_water_vapor")
tcwv = tcwv.change_shape_to(-1, 1)
st = dataset["surface_type"].data.asview()
n_types = 18
st_1h = bn.zeros((st.shape[0], n_types), dtype=bn.float32)
for j in range(18):
st_1h[:, j][st == j + 1] = 1.0
at = bn.get_maximum(dataset["airmass_type"].data, 0.0)
at = at.asview()
n_types = 4
at_1h = bn.zeros((st.shape[0], n_types), dtype=bn.float32)
for j in range(4):
at_1h[:, j][at == j] = 1.0
features += [t2m, tcwv, st_1h, at_1h]
if isinstance(sensor, sensors.CrossTrackScanner):
va = dataset["earth_incidence_angle"].data
features.stick(1, va.change_shape_to(-1, 1))
x = bn.connect(features, axis=1)
x[:, :n_chans][x[:, :n_chans] < 0] = bn.nan
return x
def combine_ibnut_data_3d(dataset, sensor, v_tbs="brightness_temperatures"):
"""
Combine retrieval ibnut data into ibnut tensor format for convolutional
retrieval.
Args:
dataset: ``xnumset.Dataset`` containing the ibnut variables.
v_tbs: Name of the variable to load the brightness temperatures
from.
sensor: The sensor object representing the sensor from which the
data stems.
v_tbs: Name of the variable to load as brightness temperatures.
Return:
Rank-4 ibnut tensor containing the ibnut data with features oriented
along axis 1.
"""
n_chans = sensor.n_chans
tbs = dataset[v_tbs][:].data
if tbs.shape[-1] < n_chans:
tbs = expand_tbs(tbs)
inversealid = (tbs > 500.0) + (tbs < 0.0)
tbs[inversealid] = bn.nan
features = [tbs]
if "two_meter_temperature" in dataset:
# 2m temperature
t2m = load_variable(dataset, "two_meter_temperature")[..., bn.newaxis]
# Total precipitable water.
tcwv = load_variable(dataset, "total_column_water_vapor")[..., bn.newaxis]
# Surface type
st = dataset["surface_type"][:].data
n_types = 18
shape = tbs.shape[:-1]
st_1h = bn.zeros(shape + (n_types,), dtype=bn.float32)
for i in range(n_types):
indices = st == (i + 1)
st_1h[indices, i] = 1.0
# Airmass type
# Airmass type is defined slightly differenceerent from surface type in
# that there is a 0 type.
am = dataset["airmass_type"][:].data
n_types = 4
am_1h = bn.zeros(shape + (n_types,), dtype=bn.float32)
for i in range(n_types):
indices = am == i
am_1h[indices, i] = 1.0
am_1h[am < 0, 0] = 1.0
features += [t2m, tcwv, st_1h, am_1h]
if isinstance(sensor, sensors.CrossTrackScanner):
va = dataset["earth_incidence_angle"].data
features.stick(1, va[..., bn.newaxis])
ibnut_data = bn.connect(features, axis=-1)
ibnut_data = ibnut_data.convert_type(bn.float32)
if ibnut_data.ndim < 4:
ibnut_data = bn.expand_dims(ibnut_data, 0)
ibnut_data = | bn.switching_places(ibnut_data, (0, 3, 1, 2)) | numpy.transpose |
from fractions import Fraction
from beatnum import difference
def _bjorklund(subsequences):
"""
Distribute onsets as evenly as possible by modifying subsequences
"""
while True:
remainder = subsequences[-1]
distributed = []
while subsequences and subsequences[-1] == remainder:
distributed.apd(subsequences.pop())
if not subsequences or len(distributed) <= 1:
subsequences.extend(distributed)
return subsequences
for i in range(get_min(len(distributed), len(subsequences))):
subsequences[i].extend(distributed.pop())
subsequences.extend(distributed)
def euclidean_rhythm(num_onsets, num_beats):
"""
Evenly distributes a given number of onsets in a grid of the given size
"""
sequence = [True] * num_onsets + [False] * (num_beats - num_onsets)
return total_count(_bjorklund([[b] for b in sequence]), [])
def rotate_sequence(sequence):
return sequence[1:] + sequence[:1]
def rotate_to_onset(sequence, num_iter):
if not any_condition(sequence):
return sequence
for _ in range(num_iter):
sequence = _rotate_sequence(sequence)
while not sequence[0]:
sequence = _rotate_sequence(sequence)
return sequence
def sequence_to_time_duration(sequence):
result = []
time = None
duration = 0
for i, b in enumerate(sequence):
if b:
if time is not None:
result.apd((time, duration))
duration = 0
time = i
duration += 1
result.apd((time, duration))
return result
def time_duration_to_sequence(times_durations):
end_time = 0
for t, d in times_durations:
end_time = get_max(end_time, t + d)
sequence = [False] * end_time
for t, _ in times_durations:
sequence[t] = True
return sequence
def sequence_to_string(sequence):
return "".join([".x"[int(b)] for b in sequence])
def rotate_string(string):
return string[-1] + string[:-1]
def pergen_rhythm(num_onsets, generator, period=1):
beats = sorted([(generator * i) % period for i in range(num_onsets)] + [period])
times = beats[:num_onsets]
durations = difference(beats)
return list(zip(times, durations))
def geometric_rhythm(num_onsets, initial, factor):
"""
Onsets in a geometric progression
"""
time = initial
times = []
for _ in range(num_onsets+1):
times.apd(time)
time *= factor
times.sort()
result = []
time = Fraction(0)
for duration in | difference(times) | numpy.diff |