repo
stringlengths 2
91
| file
stringlengths 14
211
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
1.36M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
pyterpol | pyterpol-master/pyterpol_examples/observed_spectra_fitting/v746cas_2/ondrejov/mask_tell.py | import sys
import numpy as np
tellbase = [
[6522., 6525.5],
[6530., 6538.5],
[6541.8, 6550.37],
[6551.75, 6554.9],
[6557., 6560],
[6563.6, 6564.8],
[6568.38, 6576.3],
[6580.2, 6588.2],
[6594.2, 6596.],
[6598.8, 6603.4]
]
def remove_telluric(f):
"""
Removes intervals defined in tellbase
:param f:
:return:
"""
# load the data
w,i = np.loadtxt(f, unpack=True, usecols=[0,1])
#remove wavelength intervals line by line
for lim in tellbase:
ind = np.where((w <= lim[0]) | (w >= lim[1]))[0]
w = w[ind]
i = i[ind]
np.savetxt(f, np.column_stack([w,i]), fmt='%12.6f')
def main():
f = sys.argv[1]
remove_telluric(f)
if __name__ == '__main__':
main()
| 770 | 15.404255 | 56 | py |
pyterpol | pyterpol-master/pyterpol_examples/ObservedList/example.py | """
This example demonstrates how to prepare observations.
"""
import pyterpol
################################## BARE MINIMUM #########################################
# create a blank list
ol = pyterpol.ObservedList()
# now we are ready to attach some data - lets have a look at the data first
# the spectrum is not a KOREL spectrum, so we do not have to pass additional
# information
obs1 = pyterpol.ObservedSpectrum(filename='o.asc')
# lets plot the spectrum
obs1.plot(figname='observed1.png', savefig=True)
# Lets pretend that the second spectrum is a KOREL spectrum,
# because it is not really important, what is what now.
obs2 = pyterpol.ObservedSpectrum(filename='o2.asc', component='primary', korel=True)
obs2.plot(figname='observed2.png', savefig=True)
# Now attach the sspectra to the ObservedList one by one
ol.add_one_observation(obs=obs1)
ol.add_one_observation(obs=obs2)
# review the class
print ol
# the name suggests that the spectra can be attached all
# lets clear the list first
ol.clear_all()
# add them all at once
ol.add_observations([obs1, obs2])
# review
print ol
# we saw that pyterpol complains a lot about not having the errors
# of the spectra. We also saw that no groups were assigned. That is
# because the default groups are set only by the Interface class.
# lets clear the list again
ol.clear_all()
# It is not necessary to wrap the observations into
# the class ObservedSpectrum. ObservedList does that
# for us. We only have to pass the parameters. Also lets
# pass some errors, and some groups
ol.add_one_observation(filename='o.asc', error=0.01, group=dict(rv=1))
ol.add_one_observation(filename='o2.asc', error=0.01, group=dict(rv=2), component='primary', korel=True)
# We can see that groups were set. In this configuration a separate set of radial
# velocities would be fitted for each spectrum. Such configuration is desirable
# if we work with different observed spectra.
print ol
# lets clear the class for the las time
ol.clear_all()
# If our spectra were different regions from one long spectrum,
# we may want to have the velocity same for each spectrum. Lets
# add the observations as a list of dictionaries
obs = [
dict(filename='o.asc', error=0.01, group=dict(rv=1)),
dict(filename='o2.asc', error=0.01, group=dict(rv=1), component='primary', korel=True)
]
ol.add_observations(obs)
# in this configuration there will be only one velocity
# for the two spectra. It has to be stresses that although
# two components have the same group for a parameter,
# THE SAME PARAMETER WILL NEVER BE FITTED. EACH COMPONENT
# GETS ALWAYS ITS OWN PARAMETER FOR EACH GROUP.
print ol
############################### END OF THE SAFE ZONE ####################################
# Here will be demonstration of additional methods, which are not needed for
# usage of the class. It may be potentialy be dangerous to use them, if
# your ObservedList has been added to a interface. | 2,934 | 34.361446 | 104 | py |
pyterpol | pyterpol-master/pyterpol_examples/StarList/example.py | """
This script demonstrates capabilities of the StarList class.
"""
import pyterpol
################################## BARE MINIMUM #########################################
# create an empty class
sl = pyterpol.StarList()
# pyterpol knows a set of parameters, which are given to a component
# these parameters are teff, logg, z, lr, vrot and rv. Therefore
# adding component is possible by just calling:
sl.add_component()
# review
print sl
# in general it is better to name the component, because
# than it is easier to identify what belongs to what:
sl.add_component('secondary')
# review
print sl
# and the best is to pass values of all parameters
sl.add_component('tertiary', teff=15000., logg=4.0, vrot=10., rv=-20., lr=0.1, z=1.6)
# review
print sl
# what if we do not want the component to have some parameters?
# just pass None for the parameter and forbade pyterpol
# froim using defaults
sl.add_component('quaternary', teff=None, logg=None, vrot=10., rv=-20., lr=0.1, z=None, use_defaults=False)
# In a rare case, when we want to define a parameter,
# which is not listed among the default ones, we can
# add a parameter using method add_parameter_to_component.
# First one must pass name of the component to which
# we add the data (See why it is better to set your
# component names:-) and after that just pass attributes
# of a parameter.
sl.add_parameter_to_component(component='secondary', name='Stupid_parameter',
value=6, unit='half_a_dodo', fitted=False,)
# Nevertheless if you grid is given by parameters not
# listed among the default ones, we encourage you
# to add the parameter to default ones.
#review
print sl
# What if we want to see a list of all defined physical parameters
print sl.get_physical_parameters()
############################### END OF THE SAFE ZONE ####################################
# Here will be demonstration of additional methods, which are not needed for
# usage of the class. It may be potentialy be dangerous to use them, if
# your StarList has been added to a interface.
# TODO
| 2,079 | 31 | 107 | py |
pyterpol | pyterpol-master/pyterpol_examples/disentangled_spectra_fitting/hd81357/hd81357_plot.py | """
This shows how to evaluate the outcome of the fitting
We were fitting the disentangled spectra of the secondary.
"""
import pyterpol
# 1) Load the last session - create an empty Interface
itf = pyterpol.Interface()
# fill it with teh last session
itf.load('hd81357.sav')
# 2) Have a look at the comparisons
itf.plot_all_comparisons(figname='finalfit')
# 3) Export the disentangle spectra
itf.write_synthetic_spectra(outputname='final_spectra')
# 4) Have a look how everything converged
itf.plot_convergence(figname='covergence_hd81357.png')
# 5) Have look at uncertainty of the fit
itf.plot_covariances(nbin=20)
| 627 | 20.655172 | 58 | py |
pyterpol | pyterpol-master/pyterpol_examples/disentangled_spectra_fitting/hd81357/hd81357.py | """
Real life demonstration. HD81357 is an interacting binary.
Its secondary is a Roche-lobe filling star, which is probably
losing its mass. We obtained disentangled spectra of the secondary
in two spectral regions. Here is an estimate of its radiative properties.
"""
# import numpy as np
import pyterpol
## 1) Create RegionList. This step is mandatory.
rl = pyterpol.RegionList()
# Add some regions - two in our case
rl.add_region(wmin=6324, wmax=6424, groups=dict(lr=0))
rl.add_region(wmin=4380, wmax=4497, groups=dict(lr=1))
# 2) Create ObservationList
ol = pyterpol.ObservedList()
# attach the disentangled spectra - in case of KOREL data it is mandatory to specify
# to which component the spectrum belongs and flag it as a KOREL spectrum
obs = [
dict(filename='DE_blue02_n.dat', component='secondary', korel=True, error=0.01),
dict(filename='DE_red02_n.dat', component='secondary', korel=True, error=0.01)
]
ol.add_observations(obs)
# 3) Create StarList
sl = pyterpol.StarList()
# add components
# value of each component is passed at the additionb
sl.add_component(component='secondary', teff=4200., logg=1.70, vrot=20., lr=0.2)
# 4) Create the Interface
# Interface serves a wrapper to the three different Lists, fitting environment
# and the synthetic grids. Fitting environment can be added later
itf = pyterpol.Interface(sl=sl, rl=rl, ol=ol)
# define grid properties - we want the program to use the
# cubic spline.
itf.set_grid_properties(order=4)
# Once you setup the Interface, you should not change any List
# using anything other than methods defined in the Interface.
# Doing so, may lead to unpredictable consequences.
itf.setup()
# 4) set the parameters - setting up boundaries is very important, it not only
# speeds up the computation, but also prevents the code from running out of
# the grid
itf.set_parameter(component='secondary', parname='teff', fitted=True, vmin=4005., vmax=6000.)
itf.set_parameter(component='secondary', parname='vrot', fitted=True, vmin=10., vmax=30.)
itf.set_parameter(component='secondary', parname='lr', fitted=True, vmin=0.02, vmax=0.4)
itf.set_parameter(component='secondary', parname='lr', group=1, fitted=True, value=0.10, vmin=0.05, vmax=0.4)
itf.set_parameter(component='secondary', parname='rv', fitted=True, vmin=-20.0, vmax=20.0)
# 6) choose a fitting environment - in this case it is nelder mead and
# the tolerated relative change of chi^2
itf.choose_fitter('nlopt_nelder_mead', ftol=1e-6)
# check that everything is set as intended - we have
# two relative luminosities and two radial velocities
# and that's what we wanted
print itf
"""
==============================================StarList==============================================
Component: secondary
name: rv value: 0.0 vmin: -20.0 vmax: 20.0 fitted: True group: 1 _typedef: <type 'float'>
name: rv value: 0.0 vmin: -20.0 vmax: 20.0 fitted: True group: 2 _typedef: <type 'float'>
name: teff value: 4200.0 vmin: 4005.0 vmax: 6000.0 fitted: True group: 0 _typedef: <type 'float'>
name: vrot value: 20.0 vmin: 10.0 vmax: 30.0 fitted: True group: 0 _typedef: <type 'float'>
name: logg value: 1.7 vmin: 0.0 vmax: 5.0 fitted: False group: 0 _typedef: <type 'float'>
name: lr value: 0.2 vmin: 0.02 vmax: 0.4 fitted: True group: 0 _typedef: <type 'float'>
name: lr value: 0.1 vmin: 0.05 vmax: 0.4 fitted: True group: 1 _typedef: <type 'float'>
name: z value: 1.0 vmin: 0.0 vmax: 2.0 fitted: False group: 0 _typedef: <type 'float'>
=============================================RegionList=============================================
Region name: region00: (wmin, wmax) = (6324, 6424):
component: all groups: {'lr': 0}
Region name: region01: (wmin, wmax) = (4380, 4497):
component: all groups: {'lr': 1}
============================================ObservedList============================================
List of all attached spectra:
filename: DE_blue02_n.dat component: secondary korel: True loaded: True hasErrors: True global_error: 0.01 group: {'rv': [2]} (min, max): (4377.0, 4500.0)
filename: DE_red02_n.dat component: secondary korel: True loaded: True hasErrors: True global_error: 0.01 group: {'rv': [1]} (min, max): (6321.0, 6426.96)
===============================================Fitter===============================================
Fitter: nlopt_nelder_mead optional_arguments: {'ftol': 1e-06}
Initial parameters:(lr, g.): (0.2, 0); (lr, g.): (0.1, 1); (rv, g.): (0.0, 1); (rv, g.): (0.0, 2); (teff, g.): (4200.0, 0);
(vrot, g.): (20.0, 0);
====================================================================================================
"""
# get the initial chi-square
init_pars = itf.get_fitted_parameters(attribute='value')
init_chi2 = itf.compute_chi2(init_pars)
print "The initial chi-square: %f" % (init_chi2)
"""
The initial chi-square: 20739.073943
"""
# 7) run fitting
itf.run_fit()
# get teh final chi-square
final_pars = itf.get_fitted_parameters(attribute='value')
final_chi2 = itf.compute_chi2(final_pars)
print "The final chi-square: %f" % (final_chi2)
"""
The final chi-square: 5110.224473
"""
# write the fit result
itf.write_fitted_parameters(outputname='result.dat')
# 8) when the fit is done, save the file
itf.save('hd81357.sav')
# 9) Have a look at the comparisons
itf.plot_all_comparisons(figname='finalfit')
# 10) Export the disentangle spectra
itf.write_synthetic_spectra(outputname='final_spectra')
# 11) Have a look how everything converged
itf.plot_convergence(figname='covergence_hd81357.png')
# 12) Have look at uncertainty of the fit
itf.plot_covariances(nbin=20, parameters=['lr', 'teff', 'vrot', 'logg'])
| 5,610 | 39.366906 | 154 | py |
pyterpol | pyterpol-master/pyterpol_examples/Fitter/example.py | """
This function demonstrates usage of the class Fitter.
"""
import pyterpol
import numpy as np
import matplotlib.pyplot as plt
import time
################################## BARE MINIMUM #########################################
# define a function that will be minimized
def func(x):
"""
Polynomial of order 4
:param x:
:param p:
:return:
"""
x = x[0]
return 0.5*x**4 - 2*x**3 - 5*x**2 + 12*x - 2
# create an empty fitter
fitter = pyterpol.Fitter()
# fitter is designed to work with sets of Parameter types.
# so we create one.
par = pyterpol.Parameter(name='x', value=-5., vmin=-100., vmax=100., fitted=True)
# What kind of fitter will we choose... lets have a look at
# the available ones. Note that optional arguments thatr
# control each fit are also listed. For detail have
# a look at the homepage of each environment
print fitter.list_fitters()
# nlopt_nelder_mead is always a good choice - note that
# this function expects a list of parameters not a single
# parameter, so we have to put it into brackets. Ftol
# sets that the fitting will end once the relative change
# of the cost function is less than 1e-6.
fitter.choose_fitter('nlopt_nelder_mead', fitparams=[par], ftol=1e-6)
# check the fitter
print fitter
# we can run the fitting by calling the fitter
t0 = time.time()
fitter(func)
dt1 = time.time() - t0
# have a look at the minimum and the value at minimum
print "func(%s) = %s" % (fitter.result, func(fitter.result))
# lets plot the function to be sure that we are in the global minimum
x = np.linspace(-10, 10, 200)
plt.plot(x, [func([v]) for v in x], 'k-', label='func(x)')
plt.plot(fitter.result, func(fitter.result), 'ro')
plt.ylim(-100, 100)
plt.xlabel('x')
plt.ylabel('f(x)')
plt.legend()
plt.savefig('result_nm.png')
plt.close()
# We see that the minimizer failed to find the global minimum
# it is not very unusual if we have similar minima.
# lets choose a more capable fitting environment
fitter.choose_fitter('sp_diff_evol', fitparams=[par])
# run the fitting
t0 = time.time()
fitter(func)
dt2 = time.time() - t0
# have a look at the minimum and the value at minimum
print "func(%s) = %s" % (fitter.result, func(fitter.result))
# lets plot the function to be sure that we are in the global minimum
x = np.linspace(-10, 10, 200)
plt.plot(x, [func([v]) for v in x], 'k-', label='func(x)')
plt.plot(fitter.result, func(fitter.result), 'ro')
plt.ylim(-100, 100)
plt.xlabel('x')
plt.ylabel('f(x)')
plt.legend()
plt.savefig('result_de.png')
# The simplex was faster, but converged only
# locally where the differential evolution
# converged correctly at the cost of ten times
# longer computation time
print "T(simplex) = %s" % str(dt1)
print "T(differential_evolution) = %s" % str(dt2)
| 2,764 | 28.105263 | 89 | py |
pyterpol | pyterpol-master/pyterpol_examples/RegionList/example.py | """
This script demonstrates capabilities of the RegionList class.
"""
import pyterpol
################################## BARE MINIMUM #########################################
# create an empty class
rl = pyterpol.RegionList()
# add a region - the simplest way
rl.add_region(wmin=4300, wmax=4500)
# add a region, define name
rl.add_region(wmin=6200, wmax=6600, identification='red')
# for some reason we may want to fit a region only for
# one component. Then we have to specify it. Of course
# the component has to be among those defined in StarList
# to work well later.
rl.add_region(wmin=7600, wmax=7800, identification='nir', component='primary')
# We can now check, that every group was assigned different relative
# luminosity group lr
print rl
# What if we want to fit the same relative luminosity for two regions?
# When setting groups manually you have to be careful, not to assign
# group that is the same as one of those created automatically.
# Automatically they are created 1, 2, 3 - unless one has been efined by user.
rl.add_region(wmin=6340, wmax=6350, identification='SiA', groups=dict(lr=100))
rl.add_region(wmin=6365, wmax=6375, identification='SiB', groups=dict(lr=100))
# Now there will be only one lr group for silicon lines
print rl
# if the user want to get a list of defined regions
print rl.get_registered_regions()
# or a list of wavelength limits
print rl.get_wavelengths()
############################### END OF THE SAFE ZONE ####################################
# Here will be demonstration of additional methods, which are not needed for
# usage of the class. It may be potentialy be dangerous to use them, if
# your RegionList has been added to a interface.
# We may just want to create a comparison of observed and synbthetic
# spectra and we may be very lazy. Then it is possible to read the
# regions from synthetic data.
# TODO | 1,879 | 35.153846 | 89 | py |
pyterpol | pyterpol-master/docs/conf.py | import os
# -*- coding: utf-8 -*-
#
# Pyterpol documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 26 12:34:08 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Pyterpol'
copyright = u'2016, Nemravova Jana'
author = u'Nemravova Jana'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'Pyterpol v0.0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Pyterpoldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Pyterpol.tex', u'Pyterpol Documentation',
u'Nemravova Jana', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pyterpol', u'Pyterpol Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Pyterpol', u'Pyterpol Documentation',
author, 'Pyterpol', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# add absolute path
os.path.abspath('../')
| 10,169 | 27.647887 | 80 | py |
pyterpol | pyterpol-master/grids_ABS/ready_phoenix.py | #!/usr/bin/env python
"""
ready_phoenix.py
Convert Phoenix synthetic spectra from FITS to DAT.
"""
__author__ = "Miroslav Broz ([email protected])"
__version__ = "Jun 23rd 2016"
import sys
import numpy as np
from scipy.interpolate import splrep, splev
from astropy.io import fits
from pyterpol.synthetic.auxiliary import instrumental_broadening
def read_wave(filename):
"""Read wavelength data"""
hdu = fits.open(filename)
wave = hdu[0].data
hdu.info()
print("")
print(repr(hdu[0].header))
print("")
print(wave)
print("")
hdu.close()
return wave
def fits2dat(filename, wave):
"""Convert Phoenix synthetic spectrum from FITS to DAT."""
hdu = fits.open(filename)
intens = hdu[0].data
hdu.info()
print("")
print(repr(hdu[0].header))
print("")
print(intens)
print("")
hdu.close()
# np.savetxt(filename[:-4]+'dat.0', np.column_stack([wave, intens]), fmt="%.6e %.10e") # dbg
# convolution (to 1 Angstrom)
step = 1.0
intens = instrumental_broadening(wave, intens, width=step)
s = np.array(zip(wave, intens))
print(intens)
print("")
print(s)
print("")
# spline interpolation
wmin = s[0,0]
wmax = s[-1,0]
wnew = np.arange(wmin, wmax, step)
tck = splrep(s[:,0], s[:,1])
s_new = splev(wnew, tck)
intens = s_new
# elliminate negatives!
for i in xrange(0,len(intens)):
if intens[i] < 0.0:
intens[i] = 0.0
intens[i] = intens[i]*1.e-8 # erg s^-1 cm^-2 cm^-1 -> erg s^-1 cm^-2 A^-1 (as in POLLUX)
# save spectra
out = filename[:-4]+'vis.dat'
np.savetxt(out, np.column_stack([wnew, intens]), fmt="%.6e %.10e")
sys.exit(1) # dbg
def main():
"""Convert all files"""
if len(sys.argv) > 1:
inlist = sys.argv[1:]
else:
inlist = np.loadtxt("inlist", dtype=str)
wave = read_wave("WAVE_PHOENIX-ACES-AGSS-COND-2011.fits")
for filename in inlist:
fits2dat(filename, wave)
if __name__ == "__main__":
main()
| 2,065 | 20.747368 | 97 | py |
pyterpol | pyterpol-master/pyterpol_test/test_SyntheticSpectrum/compare_c_p.py | import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.append('../../')
from pyterpol.synthetic.makespectrum import SyntheticSpectrum
gridSpec = 'grid.dat'
# basic gridspectrum used by interpol
c0_wave, c0_int = np.loadtxt(gridSpec, unpack=True, usecols=[0,1])
# shifted and rotated gridspectrum
crs_wave, crs_int = np.loadtxt('output012', unpack=True, usecols=[0,1])
syspe = SyntheticSpectrum(f=gridSpec)
py0_wave, py0_int = syspe.get_spectrum()
pyrs_wave, pyrs_int = syspe.get_spectrum(rv=30, vrot=50)
# get padded spectrum
wave, intens = syspe.get_spectrum()
p_wave, p_intens = syspe.pad_continuum(wave, intens, 10)
plt.subplot(211)
plt.plot(c0_wave, c0_int, 'k-', lw=1)
plt.plot(crs_wave, crs_int, 'k-', lw=2)
plt.plot(py0_wave, py0_int+0.5, 'r-', lw=1)
plt.plot(pyrs_wave, pyrs_int+0.5, 'r-', lw=2)
plt.plot(p_wave, p_intens+0.5, 'b-')
plt.subplot(212)
plt.plot(c0_wave, c0_int-py0_int, 'y-', lw=1)
plt.plot(c0_wave, crs_int-pyrs_int+0.1, 'y-', lw=2)
plt.show() | 992 | 27.371429 | 71 | py |
pyterpol | pyterpol-master/pyterpol_test/test_SyntheticSpectrum/test01.py | import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.append('../../')
from pyterpol.synthetic.makespectrum import SyntheticSpectrum
syspe = SyntheticSpectrum(f='temp.dat', teff=10000, logg=4.2)
# check normal output
print syspe.get_spectrum()
# choose a wave region
# this one should raise warning
wave = np.linspace(6500, 6600, 800)
base_wave, base_intens = syspe.get_spectrum(wave)
# get a shifted spectrum
wave = np.linspace(6500, 6600, 800)
rv_wave, rv_intens = syspe.get_spectrum(wave, rv=50)
np.savetxt('shifted.dat', np.column_stack([rv_wave, rv_intens]), fmt="%20.10e")
# get rotated spectrum
wave = np.linspace(6500, 6600, 800)
rot_wave, rot_intens = syspe.get_spectrum(wave, vrot=10)
print rot_wave, rot_intens
np.savetxt('rotated.dat', np.column_stack([rv_wave, rv_intens]), fmt="%20.10e")
plt.plot(base_wave, base_intens, 'k-')
plt.plot(rv_wave, rv_intens, 'r-')
plt.show() | 913 | 28.483871 | 79 | py |
pyterpol | pyterpol-master/pyterpol_test/test_comparison/test02/test02.py | """
Try to do some comparisons with various systems.
"""
import pyterpol
import matplotlib.pyplot as plt
# ================================= Binary =================================
debug=False
# 1) define some observations
ol = pyterpol.ObservedList(debug=debug)
obs = [
dict(filename='o.asc', error=0.01),
dict(filename='o2.asc', error=0.01)
]
ol.add_observations(obs)
# 2) define fitted regions
rl = pyterpol.RegionList(debug=debug)
rl.add_region(wmin=4300, wmax=4380)
rl.add_region(wmin=4460, wmax=4500)
# 3) define a star
sl = pyterpol.StarList(debug=debug)
sl.add_component(component='primary', teff=20000., logg=4.5, rv=-30., vrot=150., lr=0.5, z=1.0)
sl.add_component(component='secondary', teff=20000., logg=4.5, rv=30., vrot=10., lr=0.5, z=1.0)
# 4) define interface
itf = pyterpol.Interface(ol=ol, rl=rl, sl=sl, debug=True)
itf.setup()
itf.populate_comparisons()
itf.plot_all_comparisons()
print itf
| 939 | 25.857143 | 95 | py |
pyterpol | pyterpol-master/pyterpol_test/test_comparison/test02/test01.py | """
Try to do some comparisons with various systems.
"""
import pyterpol
import matplotlib.pyplot as plt
# ============================== Single star ===============================
debug=False
# 1) define some observations
ol = pyterpol.ObservedList(debug=debug)
obs = [
dict(filename='o.asc', error=0.01),
dict(filename='o2.asc', error=0.01)
]
ol.add_observations(obs)
# 2) define fitted regions
rl = pyterpol.RegionList(debug=debug)
rl.add_region(wmin=4300, wmax=4380)
rl.add_region(wmin=4460, wmax=4500)
# 3) define a star
sl = pyterpol.StarList(debug=debug)
sl.add_component(component='primary', teff=20000., logg=4.5, rv=-30., vrot=150., lr=0.5, z=1.0)
# 4) define interface
itf = pyterpol.Interface(ol=ol, rl=rl, sl=sl, debug=True)
itf.setup()
print itf
# ================================= Binary =================================
debug=False
# 1) define some observations
ol = pyterpol.ObservedList(debug=debug)
obs = [
dict(filename='o.asc', error=0.01),
dict(filename='o2.asc', error=0.01)
]
ol.add_observations(obs)
# 2) define fitted regions
rl = pyterpol.RegionList(debug=debug)
rl.add_region(wmin=4300, wmax=4380)
rl.add_region(wmin=4460, wmax=4500)
# 3) define a star
sl = pyterpol.StarList(debug=debug)
sl.add_component(component='primary', teff=20000., logg=4.5, rv=-30., vrot=150., lr=0.5, z=1.0)
sl.add_component(component='secondary', teff=20000., logg=4.5, rv=30., vrot=10., lr=0.5, z=1.0)
# 4) define interface
itf = pyterpol.Interface(ol=ol, rl=rl, sl=sl, debug=True)
itf.setup()
print itf
| 1,567 | 26.034483 | 95 | py |
pyterpol | pyterpol-master/pyterpol_test/test_comparison/testkorel/testkorel.py | """
Make a comparison with some data created with the old version.
KOREL format of the ouput data is used.
"""
import pyterpol
# 1) setup the model
sl = pyterpol.StarList()
sl.add_component('primary', teff=11257., logg=4.43, vrot=28.8, lr=0.744, rv=-17.94, z=1.000)
sl.add_component('secondary', teff=7714., logg=4.25, vrot=26.42, lr=0.256, rv=-16.73, z=1.000)
# setup the data
obs = [
dict(filename='output000', korel=True, component='primary',),
dict(filename='output001', korel=True, component='secondary')
]
ol = pyterpol.ObservedList()
ol.add_observations(obs)
# create interface
itf = pyterpol.Interface(ol=ol, sl=sl, debug=True)
itf.setup()
# populate the comparisons
itf.populate_comparisons()
# plot the comparisons
itf.plot_all_comparisons() | 765 | 25.413793 | 94 | py |
pyterpol | pyterpol-master/pyterpol_test/test_comparison/testnormal/testnormal.py | """
Make a comparison with some data created with the old version.
KOREL format of the ouput data is used.
"""
import pyterpol
# 1) setup the model
sl = pyterpol.StarList()
sl.add_component('primary', teff=32554., logg=3.77, vrot=64.5, lr=0.449, rv=6.685, z=1.003)
sl.add_component('secondary', teff=31205., logg=3.36, vrot=213.0, lr=0.551, rv=6.685, z=1.003)
# 2) setup regions - skip on this, we want to compare only
# rl = pyterpol.RegionList(wmin=4810, wmax=5090)
# 3) setup observed data
ol = pyterpol.ObservedList()
obs = [
dict(filename='d', error=0.01)
]
ol.add_observations(obs)
# get the interface
itf = pyterpol.Interface(ol=ol, sl=sl)
# set grid properties
itf.set_grid_properties(order=2)
# setut the grid
itf.setup()
# do the comparisons
itf.populate_comparisons()
itf.plot_all_comparisons()
| 818 | 22.4 | 94 | py |
pyterpol | pyterpol-master/pyterpol_test/test_comparison/test03/test03.py | """
Try to do some comparisons with various systems.
"""
import pyterpol
import matplotlib.pyplot as plt
# ======================== Binary wih KOREL file ===========================
debug=False
# 1) define some observations
ol = pyterpol.ObservedList(debug=debug)
obs = [
dict(filename='o.asc', error=0.01, korel=True, component='primary', group={'rv':0}),
dict(filename='o2.asc', error=0.01, korel=True, component='secondary', group={'rv':1})
]
ol.add_observations(obs)
# 2) define fitted regions
rl = pyterpol.RegionList(debug=debug)
rl.add_region(wmin=4300, wmax=4380)
rl.add_region(wmin=4460, wmax=4500)
# 3) define a star
sl = pyterpol.StarList(debug=debug)
sl.add_component(component='primary', teff=20000., logg=4.5, rv=0., vrot=150., lr=0.5, z=1.0)
sl.add_component(component='secondary', teff=10000., logg=4.5, rv=0., vrot=10., lr=0.5, z=1.0)
# 4) define interface
itf = pyterpol.Interface(ol=ol, rl=rl, sl=sl, debug=debug)
itf.setup()
print itf
print itf.rel_rvgroup_region
print itf.list_comparisons()
itf.populate_comparisons()
itf.plot_all_comparisons()
| 1,096 | 27.868421 | 94 | py |
pyterpol | pyterpol-master/pyterpol_test/test_comparison/test01/test01.py | """
Try to do some comparisons with various systems.
"""
import pyterpol
import matplotlib.pyplot as plt
# ============================== Single star ===============================
debug=False
# 1) define some observations
ol = pyterpol.ObservedList(debug=debug)
obs = [
dict(filename='o.asc', error=0.01),
dict(filename='o2.asc', error=0.01)
]
ol.add_observations(obs)
# 2) define fitted regions
rl = pyterpol.RegionList(debug=debug)
rl.add_region(wmin=4300, wmax=4380)
rl.add_region(wmin=4460, wmax=4500)
# 3) define a star
sl = pyterpol.StarList(debug=debug)
sl.add_component(component='primary', teff=20000., logg=4.5, rv=-30., vrot=150., lr=1.0, z=1.0)
# 4) define interface
itf = pyterpol.Interface(ol=ol, rl=rl, sl=sl, debug=True)
itf.setup()
itf.populate_comparisons()
itf.plot_all_comparisons()
| 832 | 24.242424 | 95 | py |
pyterpol | pyterpol-master/pyterpol_test/test_fitting/test_fitting/test04.py | """
Test computation of chi2 - fitting of 6 RVs on three spectra.
This also shows that simplex wont get out of the local minimum easily,
so more powerful fitting environments are needed if we want
to get over big obstacles.
"""
import pyterpol
rl = pyterpol.RegionList()
rl.add_region(wmin=6325, wmax=6375)
rl.add_region(wmin=6540, wmax=6600)
sl = pyterpol.StarList()
sl.add_component(component='primary', teff=17000., logg=4.0, rv=-100.0, z=1.0, vrot=40.0, lr=0.35)
sl.add_component(component='secondary', teff=26000., logg=4.0, rv=100.0, z=1.0, vrot=140.0, lr=0.65)
obs = [
dict(filename='a', error=0.001),
dict(filename='b', error=0.001),
dict(filename='c', error=0.001)
]
ol = pyterpol.ObservedList()
ol.add_observations(obs)
# setup the class
itf = pyterpol.Interface(sl=sl, ol=ol, rl=rl, debug=False, spectrum_by_spectrum=['rv'])
itf.set_grid_properties(order=2)
itf.setup()
print itf
print itf.list_comparisons()
# setup fitted parameters
# itf.set_parameter(parname='logg', component='secondary', fitted=True, vmin=3.5, vmax=4.5)
# itf.set_parameter(parname='teff', component='secondary', fitted=True, vmin=20000., vmax=28000.)
itf.set_parameter(parname='vrot', component='secondary', fitted=True, vmin=100., vmax=170.)
itf.set_parameter(parname='rv', component='secondary', fitted=True, vmin=-100., vmax=100.)
# itf.set_parameter(parname='lr', component='secondary', fitted=True, vmin=0.5, vmax=0.8)
# itf.set_parameter(parname='logg', component='primary', fitted=True, vmin=3.5, vmax=4.5)
# itf.set_parameter(parname='teff', component='primary', fitted=True, vmin=15000., vmax=20000.)
itf.set_parameter(parname='vrot', component='primary', fitted=True, vmin=40., vmax=80.)
itf.set_parameter(parname='rv', component='primary', fitted=True, vmin=-100., vmax=100.)
# itf.set_parameter(parname='lr', component='primary', fitted=True, vmin=0.2, vmax=0.5)
#
fitpars = itf.get_fitted_parameters()
#
# # # choose a fitter
itf.choose_fitter('nlopt_nelder_mead', fitparams=fitpars, ftol=1e-4)
print itf
# # first of all reduce the comparison list
l = itf.get_comparisons()
#
# # have a look at the chi^2
init_pars = pyterpol.parlist_to_list(fitpars)
init_chi2 = itf.compute_chi2(init_pars, l=l)
print "Initial settings:", init_pars, init_chi2
#
# # plot initial comparison
itf.plot_all_comparisons(l=l, figname='initial')
#
# # # do the fitting
itf.run_fit(l=l)
# #
# # # evaluate final parameters
final_pars = pyterpol.parlist_to_list(itf.get_fitted_parameters())
final_chi2 = itf.compute_chi2(final_pars, l=l)
print "Final settings:", final_pars, final_chi2
# # #
# # # 3 plot initial comparison
itf.plot_all_comparisons(l=l, figname='final_spectra')
itf.accept_fit()
| 2,697 | 32.725 | 100 | py |
pyterpol | pyterpol-master/pyterpol_test/test_fitting/test_fitting/test05.py | """
Test computation of chi2 - fitting of 6 RVs on three spectra.
This also shows that simplex wont get out of the local minimum easily,
so more powerful fitting environments are needed if we want
to get over big obstacles.
"""
import pyterpol
rl = pyterpol.RegionList()
rl.add_region(wmin=6325, wmax=6375)
rl.add_region(wmin=6540, wmax=6600)
sl = pyterpol.StarList()
sl.add_component(component='primary', teff=17000., logg=4.0, rv=-100.0, z=1.0, vrot=40.0, lr=0.35)
sl.add_component(component='secondary', teff=26000., logg=4.0, rv=100.0, z=1.0, vrot=140.0, lr=0.65)
obs = [
dict(filename='a', error=0.001, group=dict(rv=1)),
dict(filename='b', error=0.001, group=dict(rv=2)),
dict(filename='c', error=0.001, group=dict(rv=3))
]
ol = pyterpol.ObservedList()
ol.add_observations(obs)
# setup the class
itf = pyterpol.Interface(sl=sl, ol=ol, rl=rl, debug=False, spectrum_by_spectrum=['rv'])
itf.set_grid_properties(order=2)
itf.setup()
print itf
print itf.list_comparisons()
# setup fitted parameters
itf.set_parameter(parname='logg', component='secondary', fitted=True, vmin=3.5, vmax=4.5)
itf.set_parameter(parname='teff', component='secondary', fitted=True, vmin=20000., vmax=28000.)
itf.set_parameter(parname='vrot', component='secondary', fitted=True, vmin=100., vmax=170.)
itf.set_parameter(parname='rv', component='secondary', fitted=True, vmin=-100., vmax=100.)
itf.set_parameter(parname='lr', component='secondary', fitted=True, vmin=0.5, vmax=0.8)
itf.set_parameter(parname='logg', component='primary', fitted=True, vmin=3.5, vmax=4.5)
itf.set_parameter(parname='teff', component='primary', fitted=True, vmin=15000., vmax=20000.)
itf.set_parameter(parname='vrot', component='primary', fitted=True, vmin=40., vmax=80.)
itf.set_parameter(parname='rv', component='primary', fitted=True, vmin=-120., vmax=120.)
itf.set_parameter(parname='lr', component='primary', fitted=True, vmin=0.2, vmax=0.5)
fitpars = itf.get_fitted_parameters()
#
# # # choose a fitter
itf.choose_fitter('nlopt_nelder_mead', fitparams=fitpars, ftol=1e-4)
print itf
# # first of all reduce the comparison list
l = itf.get_comparisons()
#
# # have a look at the chi^2
init_pars = pyterpol.parlist_to_list(fitpars)
init_chi2 = itf.compute_chi2(init_pars, l=l)
print "Initial settings:", init_pars, init_chi2
#
# # plot initial comparison
itf.plot_all_comparisons(l=l, figname='initial')
#
# # # do the fitting
itf.run_fit(l=l)
# #
# # # evaluate final parameters
final_pars = pyterpol.parlist_to_list(itf.get_fitted_parameters())
final_chi2 = itf.compute_chi2(final_pars, l=l)
print "Final settings:", final_pars, final_chi2
# # #
# # # 3 plot initial comparison
itf.plot_all_comparisons(l=l, figname='final_spectra')
itf.accept_fit()
| 2,739 | 32.82716 | 100 | py |
pyterpol | pyterpol-master/pyterpol_test/test_fitting/test_fitting/test02.py | """
Test computation of chi2 - fitting of 6 RVs on three spectra.
This also shows that simplex wont get out of the local minimum easily,
so more powerful fitting environments are needed if we want
to get over big obstacles.
"""
import pyterpol
rl = pyterpol.RegionList()
rl.add_region(wmin=6320, wmax=6380)
rl.add_region(wmin=6500, wmax=6600)
sl = pyterpol.StarList()
sl.add_component(component='primary', teff=18000., logg=4.5, rv=10., z=1.0, vrot=50.0, lr=0.3)
sl.add_component(component='secondary', teff=25000., logg=4.5, rv=10., z=1.0, vrot=150.0, lr=0.7)
obs = [
dict(filename='a', error=0.001, group=dict(rv=1)),
dict(filename='b', error=0.001, group=dict(rv=2)),
dict(filename='c', error=0.001, group=dict(rv=3))
]
ol = pyterpol.ObservedList()
ol.add_observations(obs)
# setup the class
itf = pyterpol.Interface(sl=sl, ol=ol, rl=rl)
itf.setup()
# setup fitted parameters
itf.set_parameter(parname='rv', fitted=True)
fitpars = itf.get_fitted_parameters()
# choose a fitter
itf.choose_fitter('sp_nelder_mead', fitparams=fitpars)
# first of all reduce the comparison list
l = itf.get_comparisons()
# have a look at the chi-2
init_pars = pyterpol.parlist_to_list(fitpars)
init_chi2 = itf.compute_chi2(init_pars, l=l)
print "Initial settings:", init_pars, init_chi2
# plot initial comparison
itf.plot_all_comparisons(l=l, figname='initial_3_spectra')
# do the fitting
itf.run_fit(l=l)
# evaluate final parameters
final_pars = pyterpol.parlist_to_list(itf.get_fitted_parameters())
final_chi2 = itf.compute_chi2(final_pars, l=l)
print "Final settings:", final_pars, final_chi2
# plot initial comparison
itf.plot_all_comparisons(l=l, figname='final_3_spectra')
itf.accept_fit()
# since three parameters were a bit difficult
# repeat the fitting again
itf.run_fit(l=l)
itf.plot_all_comparisons(l=l, figname='second_final_3_spectra')
final_pars = pyterpol.parlist_to_list(itf.get_fitted_parameters())
final_chi2 = itf.compute_chi2(final_pars, l=l)
print "Second Final settings:", final_pars, final_chi2
| 2,036 | 26.527027 | 97 | py |
pyterpol | pyterpol-master/pyterpol_test/test_fitting/test_fitting/test01.py | """
Test computation of chi2 - fitting of one RV on one spectrum
"""
import pyterpol
rl = pyterpol.RegionList()
rl.add_region(wmin=6320, wmax=6380)
rl.add_region(wmin=6500, wmax=6600)
sl = pyterpol.StarList()
sl.add_component(component='primary', teff=18000., logg=4.5, rv=10., z=1.0, vrot=50.0, lr=0.3)
sl.add_component(component='secondary', teff=25000., logg=4.5, rv=10., z=1.0, vrot=150.0, lr=0.7)
obs = [
dict(filename='a', error=0.001, group=dict(rv=1)),
dict(filename='b', error=0.001, group=dict(rv=2)),
dict(filename='c', error=0.001, group=dict(rv=3))
]
ol = pyterpol.ObservedList()
ol.add_observations(obs)
# setup the class
itf = pyterpol.Interface(sl=sl, ol=ol, rl=rl)
itf.setup()
# setup fitted parameterss
itf.set_parameter(parname='rv', group=3, fitted=True)
fitpars = itf.get_fitted_parameters()
# choose a fitter
itf.choose_fitter('sp_nelder_mead', fitparams=fitpars)
# first of all reduce the comparison list
l = itf.get_comparisons(rv=3)
# have a look at the chi-2
init_pars = pyterpol.parlist_to_list(fitpars)
init_chi2 = itf.compute_chi2(init_pars, l=l)
print "Initial settings:", init_pars, init_chi2
# plot initial comparison
itf.plot_all_comparisons(l=l, figname='initial')
# do the fitting
itf.run_fit(l=l)
# evaluate final parameters
final_pars = pyterpol.parlist_to_list(itf.get_fitted_parameters())
final_chi2 = itf.compute_chi2(final_pars, l=l)
print "Final settings:", final_pars, final_chi2
# plot initial comparison
itf.plot_all_comparisons(l=l, figname='final')
# print itf.fitter.iters
| 1,555 | 23.698413 | 97 | py |
pyterpol | pyterpol-master/pyterpol_test/test_fitting/test_fitting/test03.py | """
Test computation of chi2 - fitting of 6 RVs on three spectra.
This also shows that simplex wont get out of the local minimum easily,
so more powerful fitting environments are needed if we want
to get over big obstacles.
"""
import pyterpol
rl = pyterpol.RegionList()
rl.add_region(wmin=6325, wmax=6375, groups={'lr':0})
rl.add_region(wmin=6540, wmax=6600, groups={'lr':0})
sl = pyterpol.StarList()
sl.add_component(component='primary', teff=17000., logg=4.0, rv=-100.0, z=1.0, vrot=40.0, lr=0.35)
sl.add_component(component='secondary', teff=26000., logg=4.0, rv=100.0, z=1.0, vrot=140.0, lr=0.65)
obs = [
dict(filename='a', error=0.001, group=dict(rv=1)),
dict(filename='b', error=0.001, group=dict(rv=2)),
dict(filename='c', error=0.001, group=dict(rv=3))
]
ol = pyterpol.ObservedList()
ol.add_observations(obs)
# setup the class
itf = pyterpol.Interface(sl=sl, ol=ol, rl=rl, debug=False)
itf.set_grid_properties(order=2)
itf.setup()
# setup fitted parameters
itf.set_parameter(parname='logg', component='secondary', fitted=True, vmin=3.5, vmax=4.5)
itf.set_parameter(parname='teff', component='secondary', fitted=True, vmin=20000., vmax=28000.)
itf.set_parameter(parname='vrot', component='secondary', fitted=True, vmin=100., vmax=170.)
itf.set_parameter(parname='lr', component='secondary', fitted=True, vmin=0.5, vmax=0.8)
itf.set_parameter(parname='logg', component='primary', fitted=True, vmin=3.5, vmax=4.5)
itf.set_parameter(parname='teff', component='primary', fitted=True, vmin=15000., vmax=20000.)
itf.set_parameter(parname='vrot', component='primary', fitted=True, vmin=40., vmax=80.)
itf.set_parameter(parname='lr', component='primary', fitted=True, vmin=0.2, vmax=0.5)
fitpars = itf.get_fitted_parameters()
# # choose a fitter
itf.choose_fitter('nlopt_nelde_mead', fitparams=fitpars, xtol=1e-4)
# print itf
# first of all reduce the comparison list
l = itf.get_comparisons(rv=3)
# have a look at the chi-2
print itf
print itf.list_comparisons(l=l)
init_pars = pyterpol.parlist_to_list(fitpars)
init_chi2 = itf.compute_chi2(init_pars, l=l)
print "Initial settings:", init_pars, init_chi2
# plot initial comparison
itf.plot_all_comparisons(l=l, figname='initial')
# # do the fitting
itf.run_fit(l=l)
# #
# # evaluate final parameters
final_pars = pyterpol.parlist_to_list(itf.get_fitted_parameters())
final_chi2 = itf.compute_chi2(final_pars, l=l)
print "Final settings:", final_pars, final_chi2
# #
# # 3 plot initial comparison
itf.plot_all_comparisons(l=l, figname='final_spectra')
itf.accept_fit()
| 2,551 | 31.717949 | 100 | py |
pyterpol | pyterpol-master/pyterpol_test/test_fitting/test_fitting/test06.py | """
Test computation of chi2 - fitting of 6 RVs on three spectra.
This also shows that simplex wont get out of the local minimum easily,
so more powerful fitting environments are needed if we want
to get over big obstacles.
"""
import pyterpol
rl = pyterpol.RegionList()
rl.add_region(wmin=6325, wmax=6375)
rl.add_region(wmin=6540, wmax=6600)
sl = pyterpol.StarList()
sl.add_component(component='primary', teff=17000., logg=4.0, rv=-100.0, z=1.0, vrot=40.0, lr=0.35)
sl.add_component(component='secondary', teff=26000., logg=4.0, rv=100.0, z=1.0, vrot=140.0, lr=0.65)
obs = [
dict(filename='a', error=0.001),
dict(filename='b', error=0.001),
dict(filename='c', error=0.001)
]
ol = pyterpol.ObservedList()
ol.add_observations(obs)
# setup the class
itf = pyterpol.Interface(sl=sl, ol=ol, rl=rl, debug=False, spectrum_by_spectrum=['rv', 'vrot'])
itf.set_grid_properties(order=3)
itf.setup()
print itf
print itf.list_comparisons()
# setup fitted parameters
itf.set_parameter(parname='rv', vmin=-150., vmax=150.)
itf.set_parameter(parname='logg', component='secondary', fitted=True, vmin=3.5, vmax=4.5)
itf.set_parameter(parname='teff', component='secondary', fitted=True, vmin=20000., vmax=28000.)
# itf.set_parameter(parname='vrot', component='secondary', fitted=True, vmin=100., vmax=170.)
# itf.set_parameter(parname='rv', component='secondary', fitted=True, vmin=-100., vmax=100.)
itf.set_parameter(parname='lr', component='secondary', fitted=True, vmin=0.5, vmax=0.8)
itf.set_parameter(parname='logg', component='primary', fitted=True, vmin=3.5, vmax=4.5)
itf.set_parameter(parname='teff', component='primary', fitted=True, vmin=15000., vmax=20000.)
# itf.set_parameter(parname='vrot', component='primary', fitted=True, vmin=40., vmax=80.)
# itf.set_parameter(parname='rv', component='primary', fitted=True, vmin=-120., vmax=120.)
itf.set_parameter(parname='lr', component='primary', fitted=True, vmin=0.2, vmax=0.5)
fitpars = itf.get_fitted_parameters()
#
# # # choose a fitter
itf.choose_fitter('nlopt_nelder_mead', fitparams=fitpars, ftol=1e-4)
print itf
# # first of all reduce the comparison list
l = itf.get_comparisons()
#
# # have a look at the chi^2
init_pars = pyterpol.parlist_to_list(fitpars)
init_chi2 = itf.compute_chi2(init_pars, l=l)
print "Initial settings:", init_pars, init_chi2
#
# # plot initial comparison
itf.plot_all_comparisons(l=l, figname='initial')
#
# # # do the fitting
itf.run_iterative_fit(2,l=l)
# itf.optimize_spectrum_by_spectrum()
# #
# # # evaluate final parameters
final_pars = pyterpol.parlist_to_list(itf.get_fitted_parameters())
final_chi2 = itf.compute_chi2(final_pars, l=l)
print "Final settings:", final_pars, final_chi2
# # #
# # # 3 plot initial comparison
itf.plot_all_comparisons(l=l, figname='final_spectra')
itf.accept_fit()
| 2,805 | 33.219512 | 100 | py |
pyterpol | pyterpol-master/pyterpol_test/test_fitting/test_setup/test01.py | """
Test of setting up the class to start fitting.
"""
import pyterpol
rl = pyterpol.RegionList()
rl.add_region(wmin=5300, wmax=5500)
rl.add_region(wmin=6500, wmax=6600)
sl = pyterpol.StarList()
sl.add_component(teff=10000., logg=4.5, rv=10., z=1.0, vrot=20.0)
itf = pyterpol.Interface(sl=sl, rl=rl, debug=True)
print itf
itf.set_parameter(parname='teff', value=20000., vmin=25000., vmax=15000., fitted=True)
itf.set_parameter(parname='logg', value=3.5, vmin=3., vmax=4., fitted=True)
print itf
# have a look at the fitted parameters
parlist = itf.get_fitted_parameters()
print pyterpol.parlist_to_list(parlist)
# setup and plot
itf.setup()
itf.populate_comparisons()
itf.plot_all_comparisons()
reduced = itf.get_comparisons(rv=0)
itf.populate_comparisons(l=reduced)
| 775 | 23.25 | 86 | py |
pyterpol | pyterpol-master/pyterpol_test/test_fitting/test_chi2/test01.py | """
Test computation of chi2
"""
import pyterpol
rl = pyterpol.RegionList()
rl.add_region(wmin=6320, wmax=6380)
rl.add_region(wmin=6500, wmax=6600)
sl = pyterpol.StarList()
sl.add_component(component='primary', teff=18000., logg=4.5, rv=10., z=1.0, vrot=50.0, lr=0.3)
sl.add_component(component='secondary', teff=25000., logg=4.5, rv=10., z=1.0, vrot=150.0, lr=0.7)
obs = [
dict(filename='a', error=0.001, group=dict(rv=1)),
dict(filename='b', error=0.001, group=dict(rv=2)),
dict(filename='c', error=0.001, group=dict(rv=3))
]
ol = pyterpol.ObservedList()
ol.add_observations(obs)
itf = pyterpol.Interface(sl=sl, ol=ol, rl=rl)
itf.setup()
# this reduces the list of observed spectra
reduced = itf.get_comparisons(rv=1)
# setup fitted parameterss
itf.set_parameter(parname='rv', group='all', fitted=True)
itf.set_parameter(parname='rv', component='primary', group=3, value=-100.)
itf.set_parameter(parname='rv', component='secondary', group=3, value=100.)
print itf
# this computes the models and chi-square
itf.compute_chi2([
-100., 100.,
-100., 100.,
-100., 100.,
])
itf.plot_all_comparisons()
print itf.list_comparisons()
# print itf
| 1,191 | 22.84 | 97 | py |
pyterpol | pyterpol-master/pyterpol_test/test_hjd/test.hjd.py | import numpy as np
import pyterpol
def load_observations(f):
"""
:param f: file
:return:
"""
# load the observations
flist = np.loadtxt(f, usecols=[0], unpack=True, dtype=str)
hjd = np.loadtxt(f, usecols=[1], unpack=True).tolist()
hjd[0] = None
# create list of observations
obs = []
for i, sf in enumerate(flist):
# wrap the spectrum into observed spectrum class
# o = pyterpol.ObservedSpectrum(filename=sf, group=dict(rv=0))
# o = pyterpol.ObservedSpectrum(filename=sf, group=dict(rv=0), hjd=hjd[i])
o = pyterpol.ObservedSpectrum(filename=sf, group=dict(rv=i), hjd=hjd[i])
# estimate uncertainty from continuum
o.get_sigma_from_continuum(6620., 6630.)
obs.append(o)
# create ObservedList
ol = pyterpol.ObservedList()
ol.add_observations(obs)
return ol, flist, hjd
def main():
"""
:return:
"""
# parameters
niter = 2
# 1) Generate region
rl = pyterpol.RegionList()
rl.add_region(wmin=6337., wmax=6410.0, groups=dict(lr=0))
rl.add_region(wmin=6530., wmax=6600.0, groups=dict(lr=0))
rl.add_region(wmin=6660., wmax=6690.0, groups=dict(lr=0))
# 2) Load observed data
ol = load_observations('prekor.lst')[0]
## 3) Generate components
sl = pyterpol.StarList()
sl.add_component('primary', teff=16000., logg=4.285, lr=1.0, vrot=90., z=1.0)
## 4) construct the interface
itf = pyterpol.Interface(sl=sl, rl=rl, ol=ol)
itf.set_grid_properties(order=4, step=0.05)
itf.setup()
print itf
## 5) write rvs
itf.save('test.sav')
itf.write_rvs('test.rv.dat')
# 6) try to load it
itf.load('test.sav')
# 7) and save it again
itf.save('test2.sav')
if __name__ == '__main__':
main()
| 1,829 | 24.068493 | 87 | py |
pyterpol | pyterpol-master/pyterpol_test/test_fitter/test_save.py | import pyterpol
fitter = pyterpol.Fitter()
fitter.choose_fitter('nlopt_nelder_mead', xtol=1e-5, ftol=1e-5)
fitter.save('fitter_save.txt')
print fitter
fitter.clear_all()
print fitter
fitter.load('fitter_save.txt')
print fitter | 226 | 24.222222 | 63 | py |
pyterpol | pyterpol-master/pyterpol_test/test_Parameter/test01.py | """
Test of basic properties of the Parameter class.
"""
from pyterpol.fitting.parameter import Parameter
from pyterpol.fitting.parameter import parameter_definitions as pd
# test that all parameter defined in parameter_definitions result into
# Parameter - passed
for key in pd.keys():
p = Parameter(**pd[key])
# lets try effective temperature
# Try to do some changes
p['value'] = 12000.
p['vmin'] = 8000.
p['vmax'] = 20000.
p['fitted'] = True
# try to do incorrect change
# p['value'] = 'dog' # correctly incorrect
| 532 | 18.035714 | 70 | py |
pyterpol | pyterpol-master/pyterpol_test/test_absolute_spectra/test.py | import pyterpol
import matplotlib.pyplot as plt
wmin = 3600
wmax = 4100
sygri = pyterpol.SyntheticGrid(flux_type='absolute')
params = dict(teff=9950, logg=3.7, z=1.0)
spec1 = sygri.get_synthetic_spectrum(params, [wmin, wmax], order=4, step=0.1)
params = dict(teff=10000, logg=3.5, z=1.0)
spec2 = sygri.get_synthetic_spectrum(params, [wmin, wmax], order=4, step=0.1)
params = dict(teff=10000, logg=4.0, z=1.0)
spec3 = sygri.get_synthetic_spectrum(params, [wmin, wmax], order=4, step=0.1)
ax = plt.subplot(111)
spec1.plot(ax=ax)
spec2.plot(ax=ax)
spec3.plot(ax=ax)
plt.show() | 575 | 31 | 77 | py |
pyterpol | pyterpol-master/pyterpol_test/test_SyntheticGrid/test_grid_listing.py | import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.append('../../')
import pyterpol
# create the grid
sygri = pyterpol.SyntheticGrid()
# read properties
sygri.read_list_from_file('gridlist', columns=['FILENAME', 'TEFF', 'LOGG'], family='BSTAR')
# test narrowing down the synthetic spectra list
for x in sygri.get_all(teff=20000):
print x
for x in sygri.get_all(teff=23000, logg=4.5):
print x
for x in sygri.get_all(teff=25000, logg=6.0):
print x
# checks narrowing down of the grid with > <
for x in sygri.narrow_down_grid(teff=20000):
print x
for x in sygri.narrow_down_grid(teff=25000, logg=(3.0,4.0)):
print x
for x in sygri.narrow_down_grid(teff=25000, logg=(6.0,7.0)):
print x
| 751 | 24.066667 | 91 | py |
pyterpol | pyterpol-master/pyterpol_test/test_SyntheticGrid/test_grid_creation.py | import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.append('../../')
import pyterpol
# create the grid
sygri = pyterpol.SyntheticGrid()
# this one works - checked
sygri.read_list_from_file('gridlist', columns=['FILENAME', 'TEFF', 'LOGG'], family='BSTAR')
print sygri
print sygri.get_available_values('TEFF')
print sygri.get_available_values('FAMILY')
print sygri.get_available_values('LOGG')
sygri.clear_all()
# this one should also -checked
sygri.read_list_from_file('gridlist2', columns=['FILENAME', 'TEFF', 'LOGG', 'FAMILY'])
#print sygri.SyntheticSpectraList
print sygri.get_available_values('TEFF')
print sygri.get_available_values('FAMILY')
print sygri.get_available_values('LOGG')
sygri.clear_all()
# this one should raise warning - family is assigned automatically - checked
sygri.read_list_from_file('gridlist', columns=['FILENAME', 'TEFF', 'LOGG'])
#print sygri.SyntheticSpectraList
print sygri.get_available_values('TEFF')
print sygri.get_available_values('FAMILY')
print sygri.get_available_values('LOGG')
sygri.clear_all()
# this one should raise error - checked
sygri.read_list_from_file('gridlist', columns=['FILEAME', 'TEFF', 'LOGG'])
#print sygri.SyntheticSpectraList
print sygri.get_available_values('TEFF')
print sygri.get_available_values('FAMILY')
print sygri.get_available_values('LOGG')
sygri.clear_all() | 1,351 | 32.8 | 91 | py |
pyterpol | pyterpol-master/pyterpol_test/test_SyntheticGrid/test_grid_selection.py | import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.append('../../')
import pyterpol
# create the grid
sygri = pyterpol.SyntheticGrid()
# read properties
sygri.read_list_from_file('b1.dat', columns=['FILENAME', 'TEFF', 'LOGG', 'Z'], family='BSTAR')
sygri.read_list_from_file('p1.dat', columns=['FILENAME', 'TEFF', 'LOGG', 'Z'], family='POLLUX')
# check choosing algorithm
#print sygri.get_available_values('teff', logg=2.0, z=2.00)
# test narrowing down the synthetic spectra list
values = sygri.select_parameters(logg=4.0, teff=14000, z=1.5)
for val in values:
print val
# an independent attempt without recursion
for row in sygri.parameterList:
print row
# prints column description
print sygri.columns
print sygri.get_available_values_fast('teff', logg=2.0, z=2.00)
print sygri.get_available_values_fast('logg', z=2.00) | 864 | 27.833333 | 95 | py |
pyterpol | pyterpol-master/pyterpol_test/test_SyntheticGrid/test_grid_selection_2.py | import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.append('../../')
import pyterpol
# create the grid
sygri = pyterpol.SyntheticGrid(debug=True, mode='default')
parlis = sygri.select_parameters(order=4, **{'logg':3.9999999999999982, 'z':1.0, 'teff':16000.})
print parlis
parlis = sygri.deselect_exact(parlis, **{'logg':3.9999999999999982, 'z':1.0, 'teff':16000.})
print parlis
| 399 | 29.769231 | 96 | py |
pyterpol | pyterpol-master/pyterpol_test/test_SyntheticGrid/test_grid_interpolation.py | import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.append('../../')
import pyterpol
# create the grid
sygri = pyterpol.SyntheticGrid()
# read properties
sygri.read_list_from_file('b1.dat', columns=['FILENAME', 'TEFF', 'LOGG', 'Z'], family='BSTAR')
sygri.read_list_from_file('p1.dat', columns=['FILENAME', 'TEFF', 'LOGG', 'Z'], family='POLLUX')
# test of dealing with degeneracies - this should return two pectra
sl = sygri.get_all(z=1.0, teff=15000, logg=4.5)
#print sl
# resolve degeneracy - should raise an exception - checked
#sl = sygri.resolve_degeneracy(sl)
## so we set the grid order and now it should return one spectrum - checked
sygri.set_grid_order(['BSTAR', 'POLLUX'])
sl = sygri.resolve_degeneracy(sl)
#print sl['family']
# this should create a list with intensities of individual
# spectra that will be used for interpolation
parlist, vals, keys = sygri.select_and_verify_parameters(teff=15000, logg=2.75, z=1.5, order=2)
for row in parlist:
print row
#spectra = sygri.get_spectra_for_interpolation(parlist, ['logg', 'z', 'teff'])
#for spec in spectra[:10]:
#print spec
#print len(parlist), len(spectra)
#try to interpolate the spectra
#print sygri.interpolate_spectra(parlist, spectra, [3.5, 1.0, 15100])
| 1,262 | 30.575 | 95 | py |
pyterpol | pyterpol-master/pyterpol_test/old_vs_new/pollux_barycentric_interpolator/compare.py | import sys
import numpy as np
import matplotlib.pyplot as plt
speclist = np.loadtxt(sys.argv[1], dtype=str)
plt.figure(figsize=(10,12), dpi=100)
for i,spec in enumerate(speclist):
new = np.loadtxt(spec)
old = np.loadtxt(spec[:-3]+'old')
ind = np.where((old[:,0] >= new[:,0].min()) & (old[:,0] <= new[:,0].max()))[0]
old = old[ind]
#print old
plt.subplot(211)
plt.plot(new[:,0], new[:,1]+i*0.5, 'k-', label=spec[:-4])
plt.plot(old[:,0], old[:,1]+i*0.5, 'r-', label=spec[:-4])
plt.xlim(new[:,0].min(),new[:,0].max())
plt.ylim(0.0,2.2)
plt.legend()
plt.subplot(212)
plt.plot(new[:,0], old[:,1]-new[:,1], 'k-', label=spec[:-4])
plt.xlim(new[:,0].min(),new[:,0].max())
plt.legend()
plt.savefig('comparison.png')
plt.show() | 799 | 26.586207 | 82 | py |
pyterpol | pyterpol-master/pyterpol_test/old_vs_new/pollux_hermite_interpolator/compare.py | import sys
import numpy as np
import matplotlib.pyplot as plt
speclist = np.loadtxt(sys.argv[1], dtype=str)
plt.figure(figsize=(10,12), dpi=100)
for i,spec in enumerate(speclist):
new = np.loadtxt(spec)
old = np.loadtxt(spec[:-3]+'old')
ind = np.where((old[:,0] >= new[:,0].min()) & (old[:,0] <= new[:,0].max()))[0]
old = old[ind]
#print old
plt.subplot(211)
plt.plot(new[:,0], new[:,1]+i*0.5, 'k-', label=spec[:-4])
plt.plot(old[:,0], old[:,1]+i*0.5, 'r-', label=spec[:-4])
plt.xlim(new[:,0].min(),new[:,0].max())
plt.ylim(0.0,2.2)
plt.legend()
plt.subplot(212)
plt.plot(new[:,0], old[:,1]-new[:,1], 'k-', label=spec[:-4])
plt.xlim(new[:,0].min(),new[:,0].max())
plt.legend()
plt.savefig('comparison.png')
plt.show() | 799 | 26.586207 | 82 | py |
pyterpol | pyterpol-master/pyterpol_test/old_vs_new/pollux_univariate_spline/compare.py | import sys
import numpy as np
import matplotlib.pyplot as plt
speclist = np.loadtxt(sys.argv[1], dtype=str)
plt.figure(figsize=(10,12), dpi=100)
for i,spec in enumerate(speclist):
new = np.loadtxt(spec)
old = np.loadtxt(spec[:-3]+'old')
ind = np.where((old[:,0] >= new[:,0].min()) & (old[:,0] <= new[:,0].max()))[0]
old = old[ind]
#print old
plt.subplot(211)
plt.plot(new[:,0], new[:,1]+i*0.5, 'k-', label=spec[:-4])
plt.plot(old[:,0], old[:,1]+i*0.5, 'r-', label=spec[:-4])
plt.xlim(new[:,0].min(),new[:,0].max())
plt.ylim(0.0,2.2)
plt.legend()
plt.subplot(212)
plt.plot(new[:,0], old[:,1]-new[:,1], 'k-', label=spec[:-4])
plt.xlim(new[:,0].min(),new[:,0].max())
plt.legend()
plt.savefig('comparison.png')
plt.show() | 799 | 26.586207 | 82 | py |
pyterpol | pyterpol-master/pyterpol_test/old_vs_new/pollux_adaptive_spline_order/compare.py | import sys
import numpy as np
import matplotlib.pyplot as plt
speclist = np.loadtxt(sys.argv[1], dtype=str)
plt.figure(figsize=(10,12), dpi=100)
for i,spec in enumerate(speclist):
new = np.loadtxt(spec)
old = np.loadtxt(spec[:-3]+'old')
ind = np.where((old[:,0] >= new[:,0].min()) & (old[:,0] <= new[:,0].max()))[0]
old = old[ind]
#print old
plt.subplot(211)
plt.plot(new[:,0], new[:,1]+i*0.5, 'k-', label=spec[:-4])
plt.plot(old[:,0], old[:,1]+i*0.5, 'r-', label=spec[:-4])
plt.xlim(new[:,0].min(),new[:,0].max())
plt.ylim(0.0,2.2)
plt.legend()
plt.subplot(212)
plt.plot(new[:,0], old[:,1]-new[:,1], 'k-', label=spec[:-4])
plt.xlim(new[:,0].min(),new[:,0].max())
plt.legend()
plt.savefig('comparison.png')
plt.show() | 799 | 26.586207 | 82 | py |
pyterpol | pyterpol-master/pyterpol_test/test_interpolation_bstar/test_spectra_interpolation_wrapper.py | import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.append('../../')
import pyterpol
import time
# create the grid - custom does nothing
sygri = pyterpol.SyntheticGrid('BSTAR', debug=True)
# interpolate at
wmin = 6500
wmax = 6650
keys = ['Z', 'LOGG', 'TEFF']
values = [1.5, 3.8, 18400]
params = {k:v for k,v in zip(keys, values)}
name = '_'.join([k+'_'+str(v) for k,v in zip(keys, values)])
plt.figure(figsize=(12,5), dpi=100)
spectrum = sygri.get_synthetic_spectrum(params, wmin, wmax)
w,i = spectrum.get_spectrum()
plt.plot(w, i, 'k-')
plt.savefig(name+'.png')
plt.close()
| 600 | 20.464286 | 60 | py |
pyterpol | pyterpol-master/pyterpol_test/test_interpolation_bstar/test_spectra_interpolation_order.py | import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.append('../../')
import pyterpol
import time
# create the grid - custom does nothing
sygri = pyterpol.SyntheticGrid('BSTAR', debug=True)
# interpolate at
wmin = 6500
wmax = 6650
keys = ['Z', 'LOGG', 'TEFF']
values = [1.0, 3.8, 18400]
params = {k:v for k,v in zip(keys, values)}
name = '_'.join([k+'_'+str(v) for k,v in zip(keys, values)])
ords = np.arange(2,6)
for order in ords:
spectrum = sygri.get_synthetic_spectrum(params, wmin, wmax, order=order)
w,i = spectrum.get_spectrum()
#plot the spectrum
plt.figure(figsize=(12,10), dpi=100)
plt.plot(w, i, 'k-')
plt.savefig(name+"_order_%s" % str(order)+'.png')
plt.close()
# save the spectrum in text file
np.savetxt(name+"_order_%s" % str(order)+'.dat', np.column_stack([w,i]), fmt="%.4f %.6f")
| 871 | 22.567568 | 93 | py |
pyterpol | pyterpol-master/pyterpol_test/test_interpolation_bstar/compare_orders_z_1.0/compare_orders.py | import sys
import numpy as np
import matplotlib.pyplot as plt
speclist = np.loadtxt(sys.argv[1], dtype=str)
plt.figure(figsize=(15,15), dpi=100)
for i,spec in enumerate(speclist):
new = np.loadtxt(spec)
xlim = (new[:,0].min(),new[:,0].max())
xlim = (6520,6600)
if i == 0:
fst = new.copy()
old = np.loadtxt('Z_1.0_LOGG_3.8_TEFF_18400.old')
ind = np.where((old[:,0] >= new[:,0].min()) & (old[:,0] <= new[:,0].max()))[0]
old = old[ind]
color = np.random.random(3)
plt.subplot(311)
plt.plot(new[:,0], new[:,1]+i*0.1, '-', label=spec[:-4], color=color)
plt.xlim(*xlim)
plt.ylim(0.0,2.2)
plt.legend(fontsize=10)
plt.subplot(312)
plt.plot(new[:,0], fst[:,1]-new[:,1], '-', label=spec[:-4], color=color)
plt.xlim(*xlim)
plt.legend(fontsize=10)
plt.subplot(313)
plt.plot(new[:,0], old[:,1]-new[:,1], '-', label=spec[:-4], color=color)
plt.xlim(*xlim)
plt.legend(fontsize=10)
plt.savefig('comparison.png')
plt.show() | 1,015 | 25.051282 | 79 | py |
pyterpol | pyterpol-master/pyterpol_test/test_interpolation_bstar/compare_orders_z_1.5/compare_orders.py | import sys
import numpy as np
import matplotlib.pyplot as plt
speclist = np.loadtxt(sys.argv[1], dtype=str)
plt.figure(figsize=(15,15), dpi=100)
for i,spec in enumerate(speclist):
new = np.loadtxt(spec)
xlim = (new[:,0].min(),new[:,0].max())
xlim = (6520,6600)
if i == 0:
fst = new.copy()
old = np.loadtxt('Z_1.5_LOGG_3.8_TEFF_18400.old')
ind = np.where((old[:,0] >= new[:,0].min()) & (old[:,0] <= new[:,0].max()))[0]
old = old[ind]
color = np.random.random(3)
plt.subplot(311)
plt.plot(new[:,0], new[:,1]+i*0.1, '-', label=spec[:-4], color=color)
plt.xlim(*xlim)
plt.ylim(0.0,2.2)
plt.legend(fontsize=10)
plt.subplot(312)
plt.plot(new[:,0], fst[:,1]-new[:,1], '-', label=spec[:-4], color=color)
plt.xlim(*xlim)
plt.legend(fontsize=10)
plt.subplot(313)
plt.plot(new[:,0], old[:,1]-new[:,1], '-', label=spec[:-4], color=color)
plt.xlim(*xlim)
plt.legend(fontsize=10)
plt.savefig('comparison.png')
plt.show() | 1,015 | 25.051282 | 79 | py |
pyterpol | pyterpol-master/pyterpol_test/test_interpolation_pollux/test_spectra_interpolation_wrapper.py | import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.append('../../')
import pyterpol
import time
# create the grid - custom does nothing
sygri = pyterpol.SyntheticGrid('POLLUX', debug=True)
# interpolate at
wmin = 6500
wmax = 6650
keys = ['Z', 'LOGG', 'TEFF']
values = [1.0, 4.0, 12000]
params = {k:v for k,v in zip(keys, values)}
name = '_'.join([k+'_'+str(v) for k,v in zip(keys, values)])
plt.figure(figsize=(12,5), dpi=100)
spectrum = sygri.get_synthetic_spectrum(params, wmin, wmax)
w,i = spectrum.get_spectrum()
plt.plot(w, i, 'k-')
plt.savefig(name+'.png')
plt.close()
| 601 | 20.5 | 60 | py |
pyterpol | pyterpol-master/pyterpol_test/test_interpolation_pollux/compare.py | import sys
import numpy as np
import matplotlib.pyplot as plt
speclist = np.loadtxt(sys.argv[1], dtype=str)
plt.figure(figsize=(10,12), dpi=100)
for i,spec in enumerate(speclist):
new = np.loadtxt(spec)
old = np.loadtxt(spec[:-3]+'old')
ind = np.where((old[:,0] >= new[:,0].min()) & (old[:,0] <= new[:,0].max()))[0]
old = old[ind]
#print old
plt.subplot(211)
plt.plot(new[:,0], new[:,1]+i*0.5, 'k-', label=spec[:-4])
plt.plot(old[:,0], old[:,1]+i*0.5, 'r-', label=spec[:-4])
plt.xlim(new[:,0].min(),new[:,0].max())
plt.ylim(0.0,2.2)
plt.legend()
plt.subplot(212)
plt.plot(new[:,0], old[:,1]-new[:,1], 'k-', label=spec[:-4])
plt.xlim(new[:,0].min(),new[:,0].max())
plt.legend()
plt.savefig('comparison.png')
plt.show() | 799 | 26.586207 | 82 | py |
pyterpol | pyterpol-master/pyterpol_test/test_interpolation_pollux/test_spectra_interpolation.py | import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.append('../../')
import pyterpol
import time
# create the grid - custom does nothing
sygri = pyterpol.SyntheticGrid('POLLUX', debug=True)
# interpolate at
keys_0 = ['Z', 'LOGG', 'TEFF']
values = [[1.0, 4.0, 12000],[1.0, 4.0, 12200],[1.0, 3.8, 12200]]
for vals in values:
props = {}
for k, v in zip(keys_0, vals):
props[k] = v
wmin = 6500
wmax = 6650
# select parameters
parlist, vals, keys = sygri.select_and_verify_parameters(order=3, **props)
# select corresponding spectra
spectra = sygri.get_spectra_for_interpolation(parlist, keys, wmin=wmin, wmax=wmax)
# now does the interpolation
intens = sygri.interpolate_spectra(parlist, spectra, vals)
first={}
for val, key in zip(parlist[0], keys):
first[key] = val
# choose one representative
wmin, wmax, step = sygri.get_all(**first)[0].get_size()
# get the wavelength vector
wave = np.arange(wmin, wmax+step/2., step)
# plot the result
name = "_".join([key+'_'+str(props[key]) for key in props.keys()])
plt.plot(wave, intens)
plt.savefig(name+'.png')
plt.close()
# save data
np.savetxt(name+'.dat', np.column_stack([wave, intens]), fmt="%.4f %.6f")
| 1,322 | 26.5625 | 86 | py |
pyterpol | pyterpol-master/pyterpol_test/test_interpolation_pollux/test_spectra_interpolation_order.py | import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.append('../../')
import pyterpol
import time
# create the grid - custom does nothing
sygri = pyterpol.SyntheticGrid('POLLUX', debug=True)
# interpolate at
wmin = 6500
wmax = 6650
keys = ['Z', 'LOGG', 'TEFF']
values = [1.0, 3.8, 12200]
params = {k:v for k,v in zip(keys, values)}
name = '_'.join([k+'_'+str(v) for k,v in zip(keys, values)])
ords = np.arange(2,6)
for order in ords:
spectrum = sygri.get_synthetic_spectrum(params, wmin, wmax, order=order)
w,i = spectrum.get_spectrum()
#plot the spectrum
plt.figure(figsize=(12,5), dpi=100)
plt.plot(w, i, 'k-')
plt.savefig(name+"_order_%s" % str(order)+'.png')
plt.close()
# save the spectrum in text file
np.savetxt(name+"_order_%s" % str(order)+'.dat', np.column_stack([w,i]), fmt="%.4f %.6f")
| 871 | 22.567568 | 93 | py |
pyterpol | pyterpol-master/pyterpol_test/test_interpolation_pollux/test_spectra_loading.py | import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.append('../../')
import pyterpol
import time
# create the grid - custom does nothing
sygri = pyterpol.SyntheticGrid('POLLUX')
# try to load a spectrum
keys = ['Z', 'LOGG', 'TEFF']
vals = [1.0, 4.0, 13000]
wmin = 6400
wmax = 6600
# create a dictionary for future look up
specs= {}
for k, v in zip(keys, vals):
specs[k] = v
# load the spectra
t0 = time.time()
print sygri.get_spectra_for_interpolation([vals], keys, wmin=wmin, wmax=wmax)
t1 = time.time()
print t1-t0
# load them again
t0 = time.time()
print sygri.get_spectra_for_interpolation([vals], keys, wmin=wmin, wmax=wmax)
t1 = time.time()
print t1-t0
# check that the spectrum remained loaded - and it did :D
spectrum = sygri.get_all(**specs)[0]
print spectrum.loaded
| 810 | 20.342105 | 77 | py |
pyterpol | pyterpol-master/pyterpol_test/test_interpolation_pollux/compare_orders/compare_orders.py | import sys
import numpy as np
import matplotlib.pyplot as plt
speclist = np.loadtxt(sys.argv[1], dtype=str)
plt.figure(figsize=(10,15), dpi=100)
for i,spec in enumerate(speclist):
new = np.loadtxt(spec)
if i == 0:
fst = new.copy()
old = np.loadtxt('LOGG_3.8_Z_1.0_TEFF_12200.old')
ind = np.where((old[:,0] >= new[:,0].min()) & (old[:,0] <= new[:,0].max()))[0]
old = old[ind]
color = np.random.random(3)
plt.subplot(311)
plt.plot(new[:,0], new[:,1]+i*0.1, '-', label=spec[:-4], color=color)
plt.xlim(new[:,0].min(),new[:,0].max())
plt.ylim(0.0,2.2)
plt.legend()
plt.subplot(312)
plt.plot(new[:,0], fst[:,1]-new[:,1], '-', label=spec[:-4], color=color)
plt.xlim(new[:,0].min(),new[:,0].max())
plt.legend()
plt.subplot(313)
plt.plot(new[:,0], old[:,1]-new[:,1], '-', label=spec[:-4], color=color)
plt.xlim(new[:,0].min(),new[:,0].max())
plt.legend()
plt.savefig('comparison.png')
plt.show() | 983 | 26.333333 | 79 | py |
pyterpol | pyterpol-master/pyterpol_test/test_ObservedSpectra/test_osc.py | import sys
sys.path.append('/home/jana/work/')
import pyterpol
import numpy as np
obsfile = 'ua170034.asc'
w, i = np.loadtxt(obsfile, unpack=True, usecols=[0, 1])
e = np.random.random(len(w)) * 0.01
# No spectrum is passes - only some warnings are issues -checked
#os = pyterpol.ObservedSpectrum()
# spectrum is passed - only warning regarding errors qre issued - checked
#os = pyterpol.ObservedSpectrum(wave=w, intens=i)
# spectrum is passed with filename -- warnings regarding error bars issued
#os = pyterpol.ObservedSpectrum(filename=obsfile)
# errors are also pass - locally and globally - checked
#os = pyterpol.ObservedSpectrum(wave=w, intens=i, error=e)
#print os.error
#os = pyterpol.ObservedSpectrum(wave=w, intens=i, error=0.01)
#print os.error
# errors are passed after creation of grid - checked
#os = pyterpol.ObservedSpectrum(wave=w, intens=i)
#os.set_error(vec_error=e)
#print os.error
#os.set_error(global_error=0.01)
#print os.error
# everything is passed after creation of type - checked
#os = pyterpol.ObservedSpectrum()
#os.set_spectrum_from_arrays(w,i,e)
#print os.error
# check that the spectrum is measured properly - checked
#print w[0], w[-1], os.get_boundaries()
# free the spectrum - checked
#os.free_spectrum()
#print os.wave, os.intens, os.error, os.loaded
# tests some precautions
# what if the intensities and wavelengths do not have the same length -checked
#os.set_spectrum_from_arrays(w[:-3],i,e)
# what if we set up korel, b ut do not set up component - checked
#os = pyterpol.ObservedSpectrum(wave=w, intens=i, error=e, korel=True)
# try to do estimate of the error - this is something, I'll have to think about a bit more
os = pyterpol.ObservedSpectrum(wave=w, intens=i, error=e)
#err_cont = os.get_sigma_from_continuum(4453, 4459)
#print err_cont, os.error
#err_fft = os.get_sigma_from_fft(nlast=50)
#print err_fft, os.error
# check that groups are working
os.set_group(dict(rv=1))
print os.get_group('rv')
print os.get_group('teff')
print os
| 1,997 | 29.738462 | 90 | py |
pyterpol | pyterpol-master/pyterpol_test/hd81357/hd81357.py | import numpy as np
import pyterpol
# 1) Generate region
rl = pyterpol.RegionList()
rl.add_region(wmin=6450, wmax=6650)
# 2) Generate components
sl = pyterpol.StarList()
# compute rotational velocity
prot = 33.77409
r_point = 14.68
i = np.radians(62.7)
vsini = 50.57877*r_point*np.sin(i)/prot
print vsini
# add components
sl.add_component(component='secondary', teff=4200., logg=1.86, vrot=vsini, lr=1.0)
# construct the interface
itf = pyterpol.Interface(sl=sl, rl=rl)
itf.setup()
# write and plot the spectra
itf.write_synthetic_spectra(korel=True, outputname='upper_limit')
itf.populate_comparisons()
itf.plot_all_comparisons(figname='upper_limit')
# change to the lower limit
itf.set_parameter(component='secondary', parname='logg', value=1.5)
# write and plot the spectra
itf.write_synthetic_spectra(korel=True, outputname='lower_limit')
itf.populate_comparisons()
itf.plot_all_comparisons(figname='lower_limit')
itf.save('hd81357.final.txt')
| 962 | 20.4 | 82 | py |
pyterpol | pyterpol-master/pyterpol_test/hd81357/fitting/eval_res.py | import numpy as np
import matplotlib.pyplot as plt
import pyterpol
itf = pyterpol.Interface()
itf.load('hd81357.sav')
itf.populate_comparisons()
# print itf.get_degrees_of_freedom()
# print itf.compute_chi2_treshold()
# try to load the data
data = pyterpol.read_fitlog('fit.log')
# try to plot the convergence
itf.plot_convergence(parameter='all')
# try to plot covariances
itf.plot_covariances(nbin=10, parameters=['teff', 'vrot', 'lr'])
| 447 | 18.478261 | 64 | py |
pyterpol | pyterpol-master/pyterpol_test/hd81357/fitting/hd81357.py | import numpy as np
import pyterpol
# 1) Generate region
rl = pyterpol.RegionList()
rl.add_region(wmin=6324, wmax=6424)
rl.add_region(wmin=4380, wmax=4497)
# 2) Generate observed data
ol = pyterpol.ObservedList()
obs = [
dict(filename='DE_blue02_n.dat', component='secondary', korel=True, error=0.01),
dict(filename='DE_red02_n.dat', component='secondary', korel=True, error=0.01)
]
# append the observations
ol.add_observations(obs)
# 3) Generate components
sl = pyterpol.StarList()
# add components
sl.add_component(component='secondary', teff=4200., logg=1.70, vrot=20., lr=0.2)
# 4) construct the interface
itf = pyterpol.Interface(sl=sl, rl=rl, ol=ol)
itf.set_grid_properties(order=2)
itf.setup()
# 5) adjust parameters
itf.set_parameter(component='secondary', parname='teff', fitted=True, vmin=4005., vmax=5000.)
# itf.set_parameter(component='secondary', parname='logg', fitted=True, vmin=1.0, vmax=2.5)
itf.set_parameter(component='secondary', parname='vrot', fitted=True, vmin=10., vmax=30.)
itf.set_parameter(component='secondary', parname='lr', fitted=True, vmin=0.05, vmax=0.4)
itf.set_parameter(component='secondary', parname='lr', group=1, fitted=True, value=0.10, vmin=0.05, vmax=0.4)
itf.set_parameter(component='secondary', parname='rv', fitted=True, vmin=-20.0, vmax=20.0)
# 6) choose a fitting environment
itf.choose_fitter('nlopt_nelder_mead', ftol=1e-6)
# 7) run fitting
itf.run_fit()
# 8) plot result
itf.plot_all_comparisons(figname='final')
itf.write_synthetic_spectra()
# 9) save the fit
itf.save('hd81357.sav')
| 1,557 | 28.961538 | 109 | py |
pyterpol | pyterpol-master/pyterpol_test/test_ObservedList/test02.py | """
Test of the submodule ObservedList in fitting.
Testing querying of spectra.
"""
import numpy as np
from pyterpol.fitting.interface import ObservedList
# define the type
ol = ObservedList()
# build a list of observations
obs = [
dict(filename='o.asc', group=dict(rv=0, teff=0)),
dict(filename='o.asc', group=dict(rv=0, teff=1)),
dict(filename='o.asc'),
dict(filename='o.asc', component='primary', korel=True, group=dict(logg=2))
]
# attach the spectra again
ol.add_observations(obs)
print ol
# try some queries - first groups
osl = ol.get_spectra(verbose=True, teff=1) # -- correct
print ObservedList(observedSpectraList=osl, debug=True)
osl = ol.get_spectra(verbose=True, rv=0) # -- correct
print ObservedList(observedSpectraList=osl, debug=True)
# now wavelengths
osl = ol.get_spectra(verbose=True, wmin=4300) # -- correct
print ObservedList(observedSpectraList=osl, debug=True)
osl = ol.get_spectra(verbose=True, wmin=4300, wmax=4500) # -- correct
print ObservedList(observedSpectraList=osl, debug=True)
# osl = ol.get_spectra(verbose=True, wmin=4300, wmax=5000) # -- correctly incorrect
# print ObservedList(observedSpectraList=osl, debug=True)
# components for a change
osl = ol.get_spectra(verbose=True, component='ALL') # -- correct
print ObservedList(observedSpectraList=osl, debug=True)
# korel
osl = ol.get_spectra(verbose=True, korel=True) # -- correct
print ObservedList(observedSpectraList=osl, debug=True)
# and some mixture
osl = ol.get_spectra(verbose=True, component='ALL', rv=1) # -- correct
print ObservedList(observedSpectraList=osl, debug=True)
# try to query groups based on component
groups = ol.get_groups_for_components(['primary', 'ALL'])
print groups | 1,782 | 32.018519 | 86 | py |
pyterpol | pyterpol-master/pyterpol_test/test_ObservedList/test01.py | """
Test of the submodule ObservedList in fitting.
"""
import numpy as np
from pyterpol.fitting.interface import ObservedList
# define the type
ol = ObservedList(debug=True)
# add some observations
ol.add_one_observation(filename='o.asc', group=dict(rv=0, teff=0))
ol.add_one_observation(filename='o.asc', group=dict(rv=0, teff=1))
ol.add_one_observation(filename='o.asc')
ol.add_one_observation(filename='o.asc')
# list the class
print ol
# clear the class
ol.clear_all()
# build a list of observations
obs = [
dict(filename='o.asc', group=dict(rv=0, teff=0)),
dict(filename='o.asc', group=dict(rv=0, teff=1)),
dict(filename='o.asc'),
dict(filename='o.asc')
]
# attach the spectra again
ol.add_observations(obs)
# list the class
print ol
# check the list of the observed spectra
print ol.observedSpectraList | 832 | 21.513514 | 66 | py |
pyterpol | pyterpol-master/pyterpol_test/test_ObservedList/test_save.py | """
Test of the observedlist save/load
"""
import numpy as np
from pyterpol.fitting.interface import ObservedList
# define the type
ol = ObservedList()
# build a list of observations
obs = [
dict(filename='o.asc', group=dict(rv=[0, 1], teff=0), error=0.01),
dict(filename='o.asc', group=dict(rv=0, teff=1)),
dict(filename='o.asc'),
dict(filename='o.asc', component='primary', korel=True, group=dict(logg=2))
]
# attach the spectra again
ol.add_observations(obs)
# save the class
ol.save('save_ol.txt')
print ol
# clear it
ol.clear_all()
print ol
# an
ol.load('save_ol.txt')
print ol | 603 | 19.133333 | 79 | py |
pyterpol | pyterpol-master/pyterpol_test/test_RegionList/test02.py | """
Tryout communication ObservedList -> RegionList -> StarList
"""
import pyterpol
debug = True
# 1) Observations
ol = pyterpol.ObservedList(debug=debug)
obslist = [
dict(filename='o.asc', component='secondary', error=0.01),
dict(filename='o.asc', component='primary', error=0.01),
dict(filename='o.asc', error=0.01),
]
ol.add_observations(obslist)
print ol
#2) Region
rl = pyterpol.RegionList(debug=debug)
# try to define region from observed
rl.get_regions_from_obs(ol.get_spectra())
# print rl
rl.clear_all()
# add regions
rl.add_region(identification='myfirst', wmin=0., wmax=1.)
rl.add_region(identification='mysecond', component='primary', wmin=10., wmax=20.)
rl.add_region(identification='mysecond', component='secondary', wmin=10., wmax=20., groups=dict(teff=1))
print rl
# 3) StarList
sl = pyterpol.StarList(debug=debug)
sl.add_component(component='primary', teff=20000., logg=3.5, z=1.0)
sl.add_component(component='secondary', teff=20000., logg=3.5, z=1.0)
print sl
# Communicate the region and rv settings
components = sl.get_components()
components.append('all')
sl.set_groups(ol.get_data_groups(components))
print sl
#
# sl.set_groups(rl.get_region_groups())
| 1,212 | 21.886792 | 104 | py |
pyterpol | pyterpol-master/pyterpol_test/test_RegionList/test01.py | """
Basic testing (creation and attachement of regions) of the RegionList class
"""
import pyterpol
# type
rl = pyterpol.RegionList(debug=True)
# add regions
# the simplest case
rl.add_region(identification='myfirst', wmin=0., wmax=1.) #- correct
# rl.add_region() #- correctly incorrect
# try to add the same region by name
rl.add_region(identification='myfirst') # - correctly unregistered, because it has the same name
rl.add_region(wmin=0., wmax=1.) # - correctly unregistered, because it is the same region
rl.add_region(component='primary', wmin=0., wmax=1.) # - correctly unregistered, because this region was defined for all
# try to add one component - looks good
rl.add_region(identification='mysecond', component='primary', wmin=10., wmax=20.)
rl.add_region(identification='mysecond', component='secondary', wmin=10., wmax=20.)
# try to pass some groups along with trhe rest
rl.add_region(identification='mythird', wmin=100., wmax=200., groups=dict(teff=1))
rl.add_region(identification='myfourth',component='primary', wmin=100., wmax=200., groups=dict(teff=0))
rl.add_region(identification='myfourth',component='secondary', wmin=100., wmax=200., groups=dict(teff=1))
print rl
# print rl.get_defined_groups()
print rl.get_region_groups()
| 1,258 | 36.029412 | 120 | py |
pyterpol | pyterpol-master/pyterpol_test/test_RegionList/test_save.py | """
Test saving of the class.
"""
import pyterpol
rl = pyterpol.RegionList()
rl.add_region(component='primary', identification='r1', wmin=4500., wmax=4600.)
rl.add_region(component='primary', identification='r2', wmin=4500., wmax=4700.)
rl.add_region(component='primary', identification='r3', wmin=4500., wmax=4800.)
rl.add_region(component='secondary', identification='r4', wmin=4500., wmax=4600.)
rl.add_region(component='secondary', identification='r5', wmin=4500., wmax=4800.)
print rl
rl.save('region_list.txt')
rl.clear_all()
print rl
rl.load('region_list.txt')
print rl
| 590 | 18.7 | 81 | py |
pyterpol | pyterpol-master/pyterpol_test/test_StarList/test02.py | """
Test of group assignment,
"""
import pyterpol
# build the class and add some components
sl = pyterpol.StarList()
sl.add_component(component='primary', teff=15000., logg=4.5, vrot=0.0, rv=-20.)
sl.add_component(component='secondary', teff=15000., logg=4.5, vrot=0.0, rv=-20.)
# print sl
# add two groups for rv - cloning works
sl.clone_parameter('primary', 'rv', group=1)
sl.clone_parameter('primary', 'rv', group=2)
# define some groups
groups = dict(primary=dict(rv=[0, 1]), all=dict(rv=[10, 11], teff=[0, 1]))
sl.set_groups(groups)
print sl | 550 | 26.55 | 81 | py |
pyterpol | pyterpol-master/pyterpol_test/test_StarList/test01.py | """
Test of initialization of the class StarList
"""
import pyterpol
sl = pyterpol.StarList()
sl.add_component(component='primary', teff=15000., logg=4.5, vrot=0.0, rv=-20.)
sl.add_component(component='secondary', teff=15000., logg=4.5, vrot=0.0, rv=-20.)
print sl
sl.clear()
# Try not to import one parameter
sl.add_component(component='primary', teff=None, logg=4.5, vrot=0.0, rv=-20.)
# Try not to setup anything
sl.add_component()
# Try to pass non-sencical parameter
sl.add_component(pes='cerny')
print sl | 517 | 21.521739 | 81 | py |
pyterpol | pyterpol-master/pyterpol_test/test_StarList/test03.py | """
Test of communication between StarList and ObservedList
"""
import pyterpol
# build the class and add some components
sl = pyterpol.StarList()
sl.add_component(component='primary', teff=15000., logg=4.5, vrot=0.0, rv=-20., groups=dict(z=1))
sl.add_component(component='secondary', teff=15000., logg=4.5, vrot=0.0, rv=-20., groups=dict(z=0))
# create some data
ol = pyterpol.ObservedList()
# create some observations
obs = [
dict(filename='o.asc'),
dict(filename='o.asc'),
dict(filename='o.asc', group=dict(teff=1))
]
ol.add_observations(obs)
# now query the groups and pass them to starlist
sl.set_groups(ol.get_data_groups(['primary','secondary', 'ALL']))
print sl
# completely overwrite default groups
sl.set_groups(ol.get_data_groups(['primary','secondary', 'ALL']), overwrite=True)
print sl
# return groups common for all components
print "Common groups:", sl.get_common_groups() | 907 | 26.515152 | 99 | py |
pyterpol | pyterpol-master/pyterpol_test/test_StarList/test_save.py | import pyterpol
sl = pyterpol.StarList()
sl.add_component(component='primary', teff=17000., logg=4.0, rv=-100.0, z=1.0, vrot=40.0, lr=0.35)
sl.add_component(component='secondary', teff=26000., logg=4.0, rv=100.0, z=1.0, vrot=140.0, lr=0.65)
sl.set_parameter(component='primary', group=0, name='rv', value=-123123., vmin=-1e6, vmax=1e6, fitted=True)
sl.save('save_starlist.txt')
sl.clear()
print sl
sl.load('save_starlist.txt')
print sl
| 439 | 30.428571 | 107 | py |
pyterpol | pyterpol-master/pyterpol_test/test_fwhm/test.py | import pyterpol
def main():
"""
:return:
"""
## 1) Generate region
rl = pyterpol.RegionList()
rl.add_region(wmin=6337., wmax=6410.0, groups=dict(lr=0))
rl.add_region(wmin=6530., wmax=6600.0, groups=dict(lr=0))
rl.add_region(wmin=6660., wmax=6690.0, groups=dict(lr=0))
## 2) Load observed data
# first wrap them into observed spectrum class
o1 = pyterpol.ObservedSpectrum(filename='blb00001.clean.asc', instrumental_width=0.46, group=dict(rv=0))
o2 = pyterpol.ObservedSpectrum(filename='blb00002.clean.asc', instrumental_width=0.46, group=dict(rv=1))
o1.get_sigma_from_continuum(6630., 6640.)
o2.get_sigma_from_continuum(6630., 6640.)
# create list of observed spectra
ol = pyterpol.ObservedList()
ol.add_observations([o1, o2])
# alternatively
ol = pyterpol.ObservedList()
obs = [dict(filename='blb00001.clean.asc', instrumental_width=0.46, group=dict(rv=0), error=0.01),
dict(filename='blb00002.clean.asc', instrumental_width=0.46, group=dict(rv=1), error=0.01)]
ol.add_observations(obs)
## 3) Generate components
sl = pyterpol.StarList()
sl.add_component('primary', teff=16000., logg=4.285, lr=1.0, vrot=90., z=1.0)
## 4) construct the interface
itf = pyterpol.Interface(sl=sl, rl=rl, ol=ol)
itf.set_grid_properties(order=4, step=0.05)
itf.setup()
## 5) Set parameters for fitting
itf.set_parameter(parname='teff', vmin=14000., vmax=17000., fitted=True)
itf.set_parameter(parname='logg', vmin=3.5, vmax=4.5, fitted=True)
itf.set_parameter(parname='rv', vmin=-30., vmax=0., fitted=True)
itf.set_parameter(parname='vrot', vmin=70., vmax=110., fitted=True)
itf.set_parameter(parname='lr', vmin=0.99, vmax=1.01, fitted=True)
## 6) Choose fitting environment
itf.choose_fitter('nlopt_nelder_mead', ftol=1e-4)
## 7) Run the fitting
itf.run_fit()
# write down rvs
itf.write_rvs('hd.rvs')
## 8) plot and save results
itf.plot_all_comparisons(figname='final')
itf.save('test.final.sav')
if __name__ == '__main__':
main()
| 2,126 | 32.761905 | 108 | py |
pyterpol | pyterpol-master/pyterpol_test/junk/test.py | import numpy as np
from scipy.interpolate import splrep, splev, interp1d
import matplotlib.pyplot as plt
x = np.linspace(0, 2*np.pi, 10)
y = np.sin(x)
xnew = np.linspace(0, 2*np.pi, 100)
prec = np.sin(xnew)
tck = splrep(x, y, k=3)
int_splev = splev(xnew, tck)
f = interp1d(x, y, kind='cubic')
int_interp1d = f(xnew)
plt.subplot(211)
plt.plot(x, y, 'ro', label='original')
plt.plot(xnew, prec, 'r-', label='precise')
plt.plot(xnew, int_splev, 'b-', label='splev')
plt.plot(xnew, int_interp1d, 'm-', label='interp1d')
plt.legend()
plt.subplot(212)
plt.plot(xnew, prec-int_splev, 'b-', label='splev')
plt.plot(xnew, prec-int_interp1d, 'm-', label='interp1d')
plt.legend()
plt.show() | 685 | 23.5 | 57 | py |
pyterpol | pyterpol-master/pyterpol_test/test_defaults/test_grid_defaults.py | import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.append('../../')
import pyterpol
# create the grid - custom does nothing
sygri = pyterpol.SyntheticGrid()
print sygri
## create the grid - BSTAR does nothing
sygri = pyterpol.SyntheticGrid('BSTAR')
print sygri
## create the grid - OSTAR does nothing
sygri = pyterpol.SyntheticGrid('OSTAR')
print sygri
## create the grid - POLLUX does nothing
sygri = pyterpol.SyntheticGrid('POLLUX')
print sygri
## create the grid - POLLUX does nothing
sygri = pyterpol.SyntheticGrid('AMBRE')
print sygri
## create the grid - DEFAULT does nothing
sygri = pyterpol.SyntheticGrid('DEFAULT')
print sygri
| 664 | 19.78125 | 41 | py |
pyterpol | pyterpol-master/pyterpol_test/test_interface/test04.py | """
Testing of RV group
"""
import pyterpol
import matplotlib.pyplot as plt
debug=False
# 1) define some observations
ol = pyterpol.ObservedList(debug=debug)
# obs = [
# dict(filename='o.asc', error=0.01, group=dict(rv=10)),
# dict(filename='o2.asc', error=0.01, group=dict(rv=10))
# ]
obs = [
dict(filename='o.asc', error=0.01),
dict(filename='o2.asc', error=0.01)
]
ol.add_observations(obs)
# print ol
# 2) define fitted regions
rl = pyterpol.RegionList(debug=debug)
rl.add_region(wmin=4300, wmax=4380)
rl.add_region(wmin=4460, wmax=4500, groups={'teff':1})
# 3) define a star
sl = pyterpol.StarList(debug=debug)
sl.add_component(component='primary', teff=20000., logg=4.5, rv=0.0, vrot=0.0, lr=1.0, z=1.0)
sl.add_component(component='secondary', teff=20000., logg=4.5, rv=0.0, vrot=0.0, lr=1.0, z=1.0)
# 4) define interface
itf = pyterpol.Interface(ol=ol, rl=rl, sl=sl, debug=True)
itf.setup()
# check the interface
print itf
# Start building the table for spectra creation
itf.ready_comparisons()
# check the list of comparisons
print itf.list_comparisons()
# plot the synthetic spectra
itf.synthetics['region00']['primary'].plot(savefig=True)
itf.synthetics['region01']['primary'].plot(savefig=True)
# plot the observed spectra
itf.get_observed_spectrum('o.asc').plot(savefig=True)
itf.get_observed_spectrum('o2.asc').plot(savefig=True)
| 1,398 | 23.982143 | 95 | py |
pyterpol | pyterpol-master/pyterpol_test/test_interface/test05.py | """
Testing the comparison of synthetic and observed spectra.
"""
import pyterpol
import matplotlib.pyplot as plt
debug=False
# 1) define some observations
ol = pyterpol.ObservedList(debug=debug)
# obs = [
# dict(filename='o.asc', error=0.01, group=dict(rv=10)),
# dict(filename='o2.asc', error=0.01, group=dict(rv=10))
# ]
obs = [
dict(filename='o.asc', error=0.01),
dict(filename='o2.asc', error=0.01)
]
ol.add_observations(obs)
# print ol
# 2) define fitted regions
rl = pyterpol.RegionList(debug=debug)
rl.add_region(wmin=4300, wmax=4380)
rl.add_region(wmin=4460, wmax=4500, groups={'teff':1})
# 3) define a star
sl = pyterpol.StarList(debug=debug)
sl.add_component(component='primary', teff=20000., logg=4.5, rv=-30., vrot=150., lr=0.5, z=1.0)
sl.add_component(component='secondary', teff=20000., logg=4.5, rv=30., vrot=10., lr=0.5, z=1.0)
# 4) define interface
itf = pyterpol.Interface(ol=ol, rl=rl, sl=sl, debug=True)
itf.setup()
# check the interface
print itf
# Start building the table for spectra creation
itf.ready_comparisons()
# check the list of comparisons
print itf.list_comparisons()
# try to populatte comparisons
itf.populate_comparisons()
# do some plots of the comparisons
for i in range(0, len(itf.comparisonList)):
itf.plot_comparison(i, savefig=True)
| 1,336 | 23.309091 | 95 | py |
pyterpol | pyterpol-master/pyterpol_test/test_interface/test02.py | """
Testing of creation of combination list.
"""
import pyterpol
debug=False
# 1) define some observations
ol = pyterpol.ObservedList(debug=debug)
obs = [dict(filename='o.asc', error=0.01),
dict(filename='o2.asc', error=0.01)]
ol.add_observations(obs)
# 2) define fitted regions
rl = pyterpol.RegionList(debug=debug)
rl.add_region(wmin=4300, wmax=4380)
rl.add_region(wmin=4460, wmax=4500)
# 3) define a star
sl = pyterpol.StarList(debug=debug)
sl.add_component(component='primary', teff=20000., logg=4.5, rv=0.0, vrot=0.0, lr=1.0, z=1.0)
# 4) define interface
itf = pyterpol.Interface(ol=ol, rl=rl, sl=sl, debug=True)
# 5) communicate
itf.setup_groups()
print itf
# 6) generate combinations
itf.get_combinations()
| 732 | 19.361111 | 93 | py |
pyterpol | pyterpol-master/pyterpol_test/test_interface/test01.py | import pyterpol
debug=False
# 1) define some observations
ol = pyterpol.ObservedList(debug=debug)
obs = [dict(filename='o.asc', error=0.01),
dict(filename='o2.asc', error=0.01)]
ol.add_observations(obs)
# 2) define fitted regions
rl = pyterpol.RegionList(debug=debug)
rl.add_region(wmin=4300, wmax=4380)
rl.add_region(wmin=4460, wmax=4500)
# 3) define a star
sl = pyterpol.StarList(debug=debug)
sl.add_component(component='primary', teff=20000., logg=4.5, rv=0.0, vrot=0.0, lr=1.0, z=1.0)
# 4) define interface
itf = pyterpol.Interface(ol=ol, rl=rl, sl=sl, debug=True)
# 5) communicate
itf.setup_groups()
| 619 | 22.846154 | 93 | py |
pyterpol | pyterpol-master/pyterpol_test/test_interface/test03.py | """
Testing creation of the class and settings of groups.
"""
import pyterpol
debug=False
# 1) define some observations
ol = pyterpol.ObservedList(debug=debug)
obs = [
dict(filename='o.asc', error=0.01, group=dict(rv=10)),
dict(filename='o2.asc', error=0.01, group=dict(rv=11))
]
# obs = [
# dict(filename='o.asc', error=0.01,),
# dict(filename='o2.asc', error=0.01)
# ]
ol.add_observations(obs)
print ol
# 2) define fitted regions
rl = pyterpol.RegionList(debug=debug)
rl.add_region(wmin=4300, wmax=4380)
rl.add_region(wmin=4460, wmax=4500, groups={'teff':1})
# 3) define a star
sl = pyterpol.StarList(debug=debug)
sl.add_component(component='primary', teff=20000., logg=4.5, rv=0.0, vrot=0.0, lr=1.0, z=1.0)
sl.add_component(component='secondary', teff=20000., logg=4.5, rv=0.0, vrot=0.0, lr=1.0, z=1.0)
# 4) define interface
itf = pyterpol.Interface(ol=ol, rl=rl, sl=sl, debug=True)
# 5) communicate groups
itf.setup_groups()
itf.clear_all()
# 5) communicate groups - without attaching the data
# 3) define a star
sl = pyterpol.StarList(debug=debug)
sl.add_component(component='primary', teff=20000., logg=4.5, rv=0.0, vrot=0.0, lr=1.0, z=1.0)
sl.add_component(component='secondary', teff=20000., logg=4.5, rv=0.0, vrot=0.0, lr=1.0, z=1.0)
itf = pyterpol.Interface(sl=sl, rl=rl, debug=True)
itf.setup_groups()
| 1,364 | 28.042553 | 95 | py |
pyterpol | pyterpol-master/pyterpol_test/test_interface/test_save.py | """
Testing the comparison of synthetic and observed spectra.
"""
import pyterpol
import matplotlib.pyplot as plt
debug=False
# 1) define some observations
ol = pyterpol.ObservedList(debug=debug)
obs = [dict(filename='o.asc', error=0.01, component='primary', korel=True),
dict(filename='o2.asc', error=0.01, component='secondary', korel=True)]
ol.add_observations(obs)
# 2) define fitted regions
rl = pyterpol.RegionList(debug=debug)
rl.add_region(wmin=4300, wmax=4380)
rl.add_region(wmin=4460, wmax=4500, groups={'teff':1})
# 3) define a star
sl = pyterpol.StarList(debug=debug)
sl.add_component(component='primary', teff=20000., logg=4.5, rv=-30., vrot=150., lr=0.5, z=1.0)
sl.add_component(component='secondary', teff=20000., logg=4.5, rv=30., vrot=10., lr=0.5, z=1.0)
# 4) define interface
itf = pyterpol.Interface(ol=ol, rl=rl, sl=sl, debug=False)
itf.setup()
itf.set_parameter(parname='rv', fitted=True)
itf.choose_fitter('nlopt_nelder_mead', ftol=1e-5)
print itf
# 5) save the interface
itf.save('itf_save.txt')
itf.clear_all()
# print itf
# 6) load the interface
itf.load('itf_save.txt')
itf.populate_comparisons()
itf.plot_all_comparisons()
| 1,166 | 26.139535 | 95 | py |
pyterpol | pyterpol-master/observed/observations.py | import warnings
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import splrep
from scipy.interpolate import splev
# repeat userwarnings
warnings.simplefilter('always', UserWarning)
class ObservedSpectrum:
"""
A wrapper class for the observed spectra.
"""
def __init__(self, wave=None, intens=None, error=None, filename=None,
component='all', korel=False, group=None, debug=False,
instrumental_width=0.0, **kwargs):
"""
Setups the class.
:param wave: wavelength vector (typically in angstrom)
:param intens: intensity vector (typically relative)
:param error: either error vector, or one value that will apply for whole spectrum
:param filename: ascii (2 or 3 columns - wave, intens error) with the data
:param component: components in the spectrum -- by default set to 'all'
:param korel: flag defining that spectrum was obtained with KOREL - by default false
:param group: different spectra can be grouped under certain parameter
e.g. group=dict(rv=1) that rv denoted by grioup one will
be assigned to this spectrum. This is convenient if for
example the same RV is assigned to a set of spectra.
:param instrumental_width: width of the instrumental profile from which the instrumental
broadening is computed in Angstrom (or any other wavelength in
which the observed spectra are calibrated). By default it
is zero.
:param hjd: Heliocentric Julian date can be assigned to each observed
spectrum.
"""
# empty arrays, taht will be filled
# with read_size
self.wmin = None
self.wmax = None
self.step = None
self.npixel = None
# pass all arguments
self.wave = wave
self.intens = intens
# lets have a look at the errors
if error is None:
warnings.warn("I found no array with errorbars of observed intensities. "
"Do not forget to assign them later!")
self.error = None
self.global_error = None
self.hasErrors = False
# sets that the spectrum is loaded
if (wave is not None) and (intens is not None):
self.loaded = True
self.read_size()
# check lengths of intens and wave
self.check_length()
# set the error
if isinstance(error, (float, int)) and error is not None:
self.error = np.ones(len(wave)) * error
self.hasErrors = True
self.global_error = error
elif error is not None:
self.error = error
self.hasErrors = True
self.global_error = None
else:
self.loaded = False
# if we provided the filename
self.filename = filename
if (not self.loaded) and (self.filename is not None):
self.read_spectrum_from_file(filename, global_error=error)
elif (not self.loaded) and (self.filename is None):
warnings.warn('No spectrum was loaded. This class is kinda useless without a spectrum. '
'I hope you know what you are doing.')
# assignes component
self.component = component
# setup korel and check that it is proper
self.korel = korel
self.check_korel()
# setup the group
self.group = dict()
if group is not None:
self.set_group(group)
# assigns the projected slit width
self.instrumental_width = instrumental_width
# setup debug mode
self.debug = debug
# if there is hjd passed, it is assigned to the spectrum
self.hjd = kwargs.get('hjd', None)
def __str__(self):
"""
String representation of the class.
"""
string = ''
for var in ['filename', 'component', 'korel', 'loaded', 'hasErrors', 'global_error', 'group', 'hjd']:
string += "%s: %s " % (var, str(getattr(self, var)))
if self.loaded:
string += "%s: %s " % ('(min, max)', str(self.get_boundaries()))
string += '\n'
return string
def check_korel(self):
"""
If korel is set, component must be set too.
"""
if self.korel and str(self.component).lower() == 'all':
raise ValueError('In the korel regime, each spectrum must be assigned component! '
'Currently it is set to %s.' % str(self.component))
def check_length(self):
"""
Checks that wavelengths and intensities have the same length.
"""
if len(self.wave) != len(self.intens):
raise ValueError('Wavelength vector and intensity vector do not have the same length!')
def check_loaded(self):
"""
Checks that spectrum is loaded.
"""
if not self.loaded:
raise ValueError('The spectrum is not loaded.')
def free_spectrum(self):
"""
Deletes the stored spectrum.
"""
self.wave = None
self.intens = None
self.error = None
self.loaded = False
self.hasErrors = False
def get_boundaries(self):
"""
Returns the minimal and the maximal wavelength
of the spectrum.
"""
self.read_size()
return self.wmin, self.wmax
def get_group(self, param):
"""
Get defined groups for a given parameter.
:param param: the parameter
:return: returns all groups assigned to a parameter
"""
if param.lower() in self.group:
return self.group[param]
else:
return None
def get_instrumental_width(self):
"""
Returns width of the instrumental profile.
:return:
"""
return self.instrumental_width
def get_sigma_from_continuum(self, cmin, cmax, store=True):
"""
Estimates the error of the flux from the scatter in
continuum.
:param cmin the minimal continuum value
:param cmax the maximal continuum value
:param store save the found error as an error
:return stddev the standard deviation
"""
# is the spectrum loaded ?
self.check_loaded()
# get the part around continue
intens = self.get_spectrum(wmin=cmin, wmax=cmax)[1]
# get the scatter
stddev = intens.std(ddof=1)
# save it as an error
if store:
self.global_error = stddev
self.error = stddev * np.ones(len(self.wave))
return stddev
def get_sigma_from_fft(self, nlast=20, store=True):
"""
Estimates the noise using the FFT.
:param nlast length opf the FFT spectrum tail used to estimate the scatter
:param store should we save the standard deviation
"""
# check that everything is loaded
self.check_loaded()
self.read_size()
# get the linear scale
lin_wave = np.linspace(self.wmin, self.wmax, self.npixel)
# interpolate to linear scale
tck = splrep(self.wave, self.intens)
lin_intens = splev(lin_wave, tck)
# perform the FFT and shift it
fft_intens = np.fft.fft(lin_intens)
# get absolute values
abs_fft_intens = np.absolute(fft_intens)
# get the high frequency tail
abs_fft_intens = abs_fft_intens[len(abs_fft_intens) / 2 - nlast + 1:len(abs_fft_intens) / 2 + nlast]
# estimate the error
stddev = abs_fft_intens.std() * abs_fft_intens.mean()
# store the value as an erro if needed
if store:
self.error = stddev * np.ones(len(self.wave))
self.global_error = stddev
return stddev
def get_spectrum(self, wmin=None, wmax=None):
"""
Returns the spectrum with wavelengths wmin -> wmax
:param wmin minimal wavelength
:param wmax maximal wavelength
:return wave, intens. error (optional) - the observed spectrum,
wavelength, intensity and error (if it is given)
"""
if not self.loaded:
raise Exception('The spectrum %s has not been loaded yet!' % str(self))
else:
# the whole spectrum
if wmin is None and wmax is None:
if self.error is not None:
return self.wave.copy(), self.intens.copy(), self.error.copy()
else:
return self.wave.copy(), self.intens.copy()
else:
# corrects boundaries if needed
if wmin is None:
wmin = self.wmin
if wmax is None:
wmax = self.wmax
# What if we query too long spectrum
if (wmin-self.wmin) < -1e-6 or (wmax - self.wmax) > 1e-6:
raise ValueError("Querried spectral bounds (%f %f) lie outside "
"observed spectrum bounds (%f %f)." %
(wmin, wmax, self.wmin, self.wmax))
# selects the spectrum part
ind = np.where((self.wave >= wmin) & (self.wave <= wmax))[0]
if self.error is not None:
return self.wave[ind].copy(), self.intens[ind].copy(), self.error[ind].copy()
else:
return self.wave[ind].copy(), self.intens[ind].copy()
def get_wavelength(self):
"""
Returns the wavelength vector.
OUPUT:
self.wave.. wavelengths
"""
if not self.loaded:
raise Exception('The spectrum %s has not been loaded yet!' % str(self))
else:
return self.wave.copy()
def plot(self, ax=None, savefig=False, figname=None, **kwargs):
"""
:param figname
:param savefig
:param ax: AxesSubplot
:param kwargs:
:return:
"""
w = self.wave
i = self.intens
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
props = str({'filename': self.filename, 'component': self.component, 'korel': self.korel})
ax.plot(w, i, label=props, **kwargs)
ax.set_xlim(self.wmin, self.wmax)
ax.set_ylim(0.95*i.min(), 1.05*i.max())
ax.set_xlabel('$\lambda(\AA)$')
ax.set_ylabel('$F_{\lambda}$(rel.)')
ax.legend(fontsize=10)
# save the figure
if savefig:
if figname is None:
figname = self.filename + '.png'
# save the plot
plt.savefig(figname)
def read_size(self):
"""
Gets the minimal wavelength, maximal wavelenbgth
and the mean step. Linearity in wavelength is not
required.
"""
if not self.loaded:
raise Exception('The spectrum %s has not been loaded yet!' % str(self))
self.wmin = self.wave.min()
self.wmax = self.wave.max()
self.npixel = len(self.wave)
self.step = np.mean(self.wave[1:] - self.wave[:-1])
def read_spectrum_from_file(self, filename, global_error=None):
"""
Reads the spectrum from a file. Following format
is assumed: %f %f %f (wavelength, intensity, error).
If user does not provide errors, we still attempt
to load teh spectrum.
:param filename spectrum source file
:param global_error the error applicable to the spectrum
:return None
"""
# just in case we have already set up the global error
if global_error is None and self.global_error is not None:
global_error = self.global_error
try:
# first we try to load 3 columns, i.e with errors
self.wave, self.intens, self.error = np.loadtxt(filename, unpack=True, usecols=[0, 1, 2])
self.hasErrors = True
except:
# we failed, so we attempt to load two columns
self.wave, self.intens = np.loadtxt(filename, unpack=True, usecols=[0, 1])
# error was not set up
if global_error is None:
warnings.warn("I found no errorbars of the observed intensities in file: %s! "
"I assume they will be provided later. I remember!!" % filename)
self.hasErrors = False
self.global_error = None
# error was set up
else:
self.error = global_error * np.ones(len(self.wave))
self.hasErrors = True
self.global_error = global_error
# the spectrum is marked as loaded
self.loaded = True
# the spectrum is checked
self.check_length()
self.read_size()
def reload_spectrum(self):
"""
Reloads the spectrum.
:return:
"""
if self.loaded is False:
warnings.warn('The spectrum was not loaded, so I am not reloading, but loading... just FYI.')
if self.filename is None:
raise ValueError('There has been no spectrum given for %s' % (str(self)))
self.read_spectrum_from_file(self.filename)
def select_random_subset(self, frac):
"""
:param frac: sepctrum fraction 0.0-1.0
:return:
"""
if not self.loaded:
raise AttributeError('Cannost select a subset. '
'The spectrum %s has not been loaded yet.' % (str(self)))
# set the newlength
newlength = int(np.ceil(frac*self.npixel))
if newlength >= self.npixel:
return
# surviving spectra indices
inds = np.sort(np.random.randint(self.npixel, size=newlength))
# adjustr the spectra
self.wave = self.wave[inds]
self.intens = self.intens[inds]
if self.error is not None:
self.error = self.error[inds]
# measure the spectrum
self.read_size()
def set_error(self, vec_error=None, global_error=None):
"""
Sets error to the spectrum..either local or global.
:param vec_error vector error len(vec_error) = len(spectrum)
:param global_error int float error applied to the whole spectrum
"""
if vec_error is not None:
self.error = vec_error
if len(vec_error) != len(self.npixel):
raise ValueError('The lenght of the error vector and the length of the spectrum do not match (%s, %s)'
% (len(vec_error), str(self.npixel)))
self.hasErrors = True
self.global_error = None
if global_error is not None:
self.error = global_error * np.ones(len(self.wave))
self.hasErrors = True
self.global_error = global_error
def set_group(self, group):
"""
Sets a group to the spectrum
:param group a dictionary of pairs parameter + group
"""
# print group
for key in group.keys():
self.group[key.lower()] = group[key]
def set_spectrum_from_arrays(self, wave, intens, error):
"""
Stores the spectrum from arrays. It is assumed
that user also provides error vector.
:param wave wavelength vector
:param intens intensity vector
:param error eror vector
"""
self.wave = wave
self.intens = intens
self.error = error
self.loaded = True
self.hasErrors = True
# checking and reading
self.check_length()
self.read_size()
| 15,903 | 33.498915 | 118 | py |
pyterpol | pyterpol-master/observed/__init__.py | 2 | 0 | 0 | py |
|
davis2017-evaluation | davis2017-evaluation-master/setup.py | from setuptools import setup
import sys
if sys.version_info < (3, 6):
sys.exit('Sorry, only Python >= 3.6 is supported')
setup(
python_requires='>=3.6, <4',
install_requires=[
'Pillow>=4.1.1',
'networkx>=2.0',
'numpy>=1.12.1',
'opencv-python>=4.0.0.21',
'pandas>=0.21.1',
'pathlib2;python_version<"3.5"',
'scikit-image>=0.13.1',
'scikit-learn>=0.18',
'scipy>=1.0.0',
'tqdm>=4.28.1'
],packages=['davis2017'])
| 506 | 23.142857 | 54 | py |
davis2017-evaluation | davis2017-evaluation-master/evaluation_method.py | #!/usr/bin/env python
import os
import sys
from time import time
import argparse
import numpy as np
import pandas as pd
from davis2017.evaluation import DAVISEvaluation
default_davis_path = '/path/to/the/folder/DAVIS'
time_start = time()
parser = argparse.ArgumentParser()
parser.add_argument('--davis_path', type=str, help='Path to the DAVIS folder containing the JPEGImages, Annotations, '
'ImageSets, Annotations_unsupervised folders',
required=False, default=default_davis_path)
parser.add_argument('--set', type=str, help='Subset to evaluate the results', default='val')
parser.add_argument('--task', type=str, help='Task to evaluate the results', default='unsupervised',
choices=['semi-supervised', 'unsupervised'])
parser.add_argument('--results_path', type=str, help='Path to the folder containing the sequences folders',
required=True)
args, _ = parser.parse_known_args()
csv_name_global = f'global_results-{args.set}.csv'
csv_name_per_sequence = f'per-sequence_results-{args.set}.csv'
# Check if the method has been evaluated before, if so read the results, otherwise compute the results
csv_name_global_path = os.path.join(args.results_path, csv_name_global)
csv_name_per_sequence_path = os.path.join(args.results_path, csv_name_per_sequence)
if os.path.exists(csv_name_global_path) and os.path.exists(csv_name_per_sequence_path):
print('Using precomputed results...')
table_g = pd.read_csv(csv_name_global_path)
table_seq = pd.read_csv(csv_name_per_sequence_path)
else:
print(f'Evaluating sequences for the {args.task} task...')
# Create dataset and evaluate
dataset_eval = DAVISEvaluation(davis_root=args.davis_path, task=args.task, gt_set=args.set)
metrics_res = dataset_eval.evaluate(args.results_path)
J, F = metrics_res['J'], metrics_res['F']
# Generate dataframe for the general results
g_measures = ['J&F-Mean', 'J-Mean', 'J-Recall', 'J-Decay', 'F-Mean', 'F-Recall', 'F-Decay']
final_mean = (np.mean(J["M"]) + np.mean(F["M"])) / 2.
g_res = np.array([final_mean, np.mean(J["M"]), np.mean(J["R"]), np.mean(J["D"]), np.mean(F["M"]), np.mean(F["R"]),
np.mean(F["D"])])
g_res = np.reshape(g_res, [1, len(g_res)])
table_g = pd.DataFrame(data=g_res, columns=g_measures)
with open(csv_name_global_path, 'w') as f:
table_g.to_csv(f, index=False, float_format="%.3f")
print(f'Global results saved in {csv_name_global_path}')
# Generate a dataframe for the per sequence results
seq_names = list(J['M_per_object'].keys())
seq_measures = ['Sequence', 'J-Mean', 'F-Mean']
J_per_object = [J['M_per_object'][x] for x in seq_names]
F_per_object = [F['M_per_object'][x] for x in seq_names]
table_seq = pd.DataFrame(data=list(zip(seq_names, J_per_object, F_per_object)), columns=seq_measures)
with open(csv_name_per_sequence_path, 'w') as f:
table_seq.to_csv(f, index=False, float_format="%.3f")
print(f'Per-sequence results saved in {csv_name_per_sequence_path}')
# Print the results
sys.stdout.write(f"--------------------------- Global results for {args.set} ---------------------------\n")
print(table_g.to_string(index=False))
sys.stdout.write(f"\n---------- Per sequence results for {args.set} ----------\n")
print(table_seq.to_string(index=False))
total_time = time() - time_start
sys.stdout.write('\nTotal time:' + str(total_time))
| 3,492 | 49.623188 | 118 | py |
davis2017-evaluation | davis2017-evaluation-master/evaluation_codalab.py | #!/usr/bin/env python
import sys
import os.path
from time import time
import numpy as np
import pandas
from davis2017.evaluation import DAVISEvaluation
task = 'semi-supervised'
gt_set = 'test-dev'
time_start = time()
# as per the metadata file, input and output directories are the arguments
if len(sys.argv) < 3:
input_dir = "input_dir"
output_dir = "output_dir"
debug = True
else:
[_, input_dir, output_dir] = sys.argv
debug = False
# unzipped submission data is always in the 'res' subdirectory
# https://github.com/codalab/codalab-competitions/wiki/User_Building-a-Scoring-Program-for-a-Competition#directory-structure-for-submissions
submission_path = os.path.join(input_dir, 'res')
if not os.path.exists(submission_path):
sys.exit('Could not find submission file {0}'.format(submission_path))
# unzipped reference data is always in the 'ref' subdirectory
# https://github.com/codalab/codalab-competitions/wiki/User_Building-a-Scoring-Program-for-a-Competition#directory-structure-for-submissions
gt_path = os.path.join(input_dir, 'ref')
if not os.path.exists(gt_path):
sys.exit('Could not find GT file {0}'.format(gt_path))
# Create dataset
dataset_eval = DAVISEvaluation(davis_root=gt_path, gt_set=gt_set, task=task, codalab=True)
# Check directory structure
res_subfolders = os.listdir(submission_path)
if len(res_subfolders) == 1:
sys.stdout.write(
"Incorrect folder structure, the folders of the sequences have to be placed directly inside the "
"zip.\nInside every folder of the sequences there must be an indexed PNG file for every frame.\n"
"The indexes have to match with the initial frame.\n")
sys.exit()
# Check that all sequences are there
missing = False
for seq in dataset_eval.dataset.get_sequences():
if seq not in res_subfolders:
sys.stdout.write(seq + " sequence is missing.\n")
missing = True
if missing:
sys.stdout.write(
"Verify also the folder structure, the folders of the sequences have to be placed directly inside "
"the zip.\nInside every folder of the sequences there must be an indexed PNG file for every frame.\n"
"The indexes have to match with the initial frame.\n")
sys.exit()
metrics_res = dataset_eval.evaluate(submission_path, debug=debug)
J, F = metrics_res['J'], metrics_res['F']
# Generate output to the stdout
seq_names = list(J['M_per_object'].keys())
if gt_set == "val" or gt_set == "train" or gt_set == "test-dev":
sys.stdout.write("----------------Global results in CSV---------------\n")
g_measures = ['J&F-Mean', 'J-Mean', 'J-Recall', 'J-Decay', 'F-Mean', 'F-Recall', 'F-Decay']
final_mean = (np.mean(J["M"]) + np.mean(F["M"])) / 2.
g_res = np.array([final_mean, np.mean(J["M"]), np.mean(J["R"]), np.mean(J["D"]), np.mean(F["M"]), np.mean(F["R"]),
np.mean(F["D"])])
table_g = pandas.DataFrame(data=np.reshape(g_res, [1, len(g_res)]), columns=g_measures)
table_g.to_csv(sys.stdout, index=False, float_format="%0.3f")
sys.stdout.write("\n\n------------Per sequence results in CSV-------------\n")
seq_measures = ['Sequence', 'J-Mean', 'F-Mean']
J_per_object = [J['M_per_object'][x] for x in seq_names]
F_per_object = [F['M_per_object'][x] for x in seq_names]
table_seq = pandas.DataFrame(data=list(zip(seq_names, J_per_object, F_per_object)), columns=seq_measures)
table_seq.to_csv(sys.stdout, index=False, float_format="%0.3f")
# Write scores to a file named "scores.txt"
with open(os.path.join(output_dir, 'scores.txt'), 'w') as output_file:
final_mean = (np.mean(J["M"]) + np.mean(F["M"])) / 2.
output_file.write("GlobalMean: %f\n" % final_mean)
output_file.write("JMean: %f\n" % np.mean(J["M"]))
output_file.write("JRecall: %f\n" % np.mean(J["R"]))
output_file.write("JDecay: %f\n" % np.mean(J["D"]))
output_file.write("FMean: %f\n" % np.mean(F["M"]))
output_file.write("FRecall: %f\n" % np.mean(F["R"]))
output_file.write("FDecay: %f\n" % np.mean(F["D"]))
total_time = time() - time_start
sys.stdout.write('\nTotal time:' + str(total_time))
| 4,122 | 42.861702 | 140 | py |
davis2017-evaluation | davis2017-evaluation-master/pytest/test_evaluation.py | import os
import sys
import numpy as np
import pandas
from time import time
from collections import defaultdict
from davis2017.evaluation import DAVISEvaluation
from davis2017 import utils
from davis2017.metrics import db_eval_boundary, db_eval_iou
davis_root = 'input_dir/ref'
methods_root = 'examples'
def test_task(task, gt_set, res_path, J_target=None, F_target=None, metric=('J', 'F')):
dataset_eval = DAVISEvaluation(davis_root=davis_root, gt_set=gt_set, task=task, codalab=True)
metrics_res = dataset_eval.evaluate(res_path, debug=False, metric=metric)
num_seq = len(list(dataset_eval.dataset.get_sequences()))
J = metrics_res['J'] if 'J' in metric else {'M': np.zeros(num_seq), 'R': np.zeros(num_seq), 'D': np.zeros(num_seq)}
F = metrics_res['F'] if 'F' in metric else {'M': np.zeros(num_seq), 'R': np.zeros(num_seq), 'D': np.zeros(num_seq)}
if gt_set == "val" or gt_set == "train" or gt_set == "test-dev":
sys.stdout.write("----------------Global results in CSV---------------\n")
g_measures = ['J&F-Mean', 'J-Mean', 'J-Recall', 'J-Decay', 'F-Mean', 'F-Recall', 'F-Decay']
final_mean = (np.mean(J["M"]) + np.mean(F["M"])) / 2. if 'J' in metric and 'F' in metric else 0
g_res = np.array([final_mean, np.mean(J["M"]), np.mean(J["R"]), np.mean(J["D"]), np.mean(F["M"]), np.mean(F["R"]), np.mean(F["D"])])
table_g = pandas.DataFrame(data=np.reshape(g_res, [1, len(g_res)]), columns=g_measures)
table_g.to_csv(sys.stdout, index=False, float_format="%0.3f")
if J_target is not None:
assert check_results_similarity(J, J_target), f'J {print_error(J, J_target)}'
if F_target is not None:
assert check_results_similarity(F, F_target), f'F {print_error(F, F_target)}'
return J, F
def check_results_similarity(target, result):
return np.isclose(np.mean(target['M']) - result[0], 0, atol=0.001) & \
np.isclose(np.mean(target['R']) - result[1], 0, atol=0.001) & \
np.isclose(np.mean(target['D']) - result[2], 0, atol=0.001)
def print_error(target, result):
return f'M:{np.mean(target["M"])} = {result[0]}\t' + \
f'R:{np.mean(target["R"])} = {result[1]}\t' + \
f'D:{np.mean(target["D"])} = {result[2]}'
def test_semisupervised_premvos():
method_path = os.path.join(methods_root, 'premvos')
print('Evaluating PREMVOS val')
J_val = [0.739, 0.831, 0.162]
F_val = [0.818, 0.889, 0.195]
test_task('semi-supervised', 'val', method_path, J_val, F_val)
print('Evaluating PREMVOS test-dev')
J_test_dev = [0.675, 0.768, 0.217]
F_test_dev = [0.758, 0.843, 0.206]
test_task('semi-supervised', 'test-dev', method_path, J_test_dev, F_test_dev)
print('\n')
def test_semisupervised_onavos():
method_path = os.path.join(methods_root, 'onavos')
print('Evaluating OnAVOS val')
J_val = [0.616, 0.674, 0.279]
F_val = [0.691, 0.754, 0.266]
test_task('semi-supervised', 'val', method_path, J_val, F_val)
print('Evaluating OnAVOS test-dev')
J_test_dev = [0.499, 0.543, 0.230]
F_test_dev = [0.557, 0.603, 0.234]
test_task('semi-supervised', 'test-dev', method_path, J_test_dev, F_test_dev)
print('\n')
def test_semisupervised_osvos():
method_path = os.path.join(methods_root, 'osvos')
print('Evaluating OSVOS val')
J_val = [0.566, 0.638, 0.261]
F_val = [0.639, 0.738, 0.270]
test_task('semi-supervised', 'val', method_path, J_val, F_val)
print('Evaluating OSVOS test-dev')
J_test_dev = [0.470, 0.521, 0.192]
F_test_dev = [0.548, 0.597, 0.198]
test_task('semi-supervised', 'test-dev', method_path, J_test_dev, F_test_dev)
print('\n')
def test_unsupervised_flip_gt():
print('Evaluating Unsupervised Permute GT')
method_path = os.path.join(methods_root, 'swap_gt')
if not os.path.isdir(method_path):
utils.generate_random_permutation_gt_obj_proposals(davis_root, 'val', method_path)
# utils.generate_random_permutation_gt_obj_proposals('test-dev', method_path)
J_val = [1, 1, 0]
F_val= [1, 1, 0]
test_task('unsupervised', 'val', method_path, J_val, F_val)
# test_task('unsupervised', 'test-dev', method_path, J_val, F_val)
def test_unsupervised_rvos():
print('Evaluating RVOS')
method_path = os.path.join(methods_root, 'rvos')
test_task('unsupervised', 'val', method_path)
# test_task('unsupervised', 'test-dev', method_path)
def test_unsupervsied_multiple_proposals(num_proposals=20, metric=('J', 'F')):
print('Evaluating Multiple Proposals')
method_path = os.path.join(methods_root, f'generated_proposals_{num_proposals}')
utils.generate_obj_proposals(davis_root, 'val', num_proposals, method_path)
# utils.generate_obj_proposals('test-dev', num_proposals, method_path)
test_task('unsupervised', 'val', method_path, metric=metric)
# test_task('unsupervised', 'test-dev', method_path, metric=metric)
def test_void_masks():
gt = np.zeros((2, 200, 200))
mask = np.zeros((2, 200, 200))
void = np.zeros((2, 200, 200))
gt[:, 100:150, 100:150] = 1
void[:, 50:100, 100:150] = 1
mask[:, 50:150, 100:150] = 1
assert np.mean(db_eval_iou(gt, mask, void)) == 1
assert np.mean(db_eval_boundary(gt, mask, void)) == 1
def benchmark_number_proposals():
number_proposals = [10, 15, 20, 30]
timing_results = defaultdict(dict)
for n in number_proposals:
time_start = time()
test_unsupervsied_multiple_proposals(n, 'J')
timing_results['J'][n] = time() - time_start
for n in number_proposals:
time_start = time()
test_unsupervsied_multiple_proposals(n)
timing_results['J_F'][n] = time() - time_start
print(f'Using J {timing_results["J"]}')
print(f'Using J&F {timing_results["J_F"]}')
# Using J {10: 156.45335865020752, 15: 217.91797709465027, 20: 282.0747673511505, 30: 427.6770250797272}
# Using J & F {10: 574.3529748916626, 15: 849.7542386054993, 20: 1123.4619634151459, 30: 1663.6704666614532}
# Codalab
# Using J & F {10: 971.196366071701, 15: 1473.9757001399994, 20: 1918.787559747696, 30: 3007.116141319275}
if __name__ == '__main__':
# Test void masks
test_void_masks()
# Test semi-supervised methods
test_semisupervised_premvos()
test_semisupervised_onavos()
test_semisupervised_osvos()
# Test unsupervised methods
test_unsupervised_flip_gt()
# test_unsupervised_rvos()
test_unsupervsied_multiple_proposals()
| 6,518 | 38.271084 | 140 | py |
davis2017-evaluation | davis2017-evaluation-master/davis2017/utils.py | import os
import errno
import numpy as np
from PIL import Image
import warnings
from davis2017.davis import DAVIS
def _pascal_color_map(N=256, normalized=False):
"""
Python implementation of the color map function for the PASCAL VOC data set.
Official Matlab version can be found in the PASCAL VOC devkit
http://host.robots.ox.ac.uk/pascal/VOC/voc2012/index.html#devkit
"""
def bitget(byteval, idx):
return (byteval & (1 << idx)) != 0
dtype = 'float32' if normalized else 'uint8'
cmap = np.zeros((N, 3), dtype=dtype)
for i in range(N):
r = g = b = 0
c = i
for j in range(8):
r = r | (bitget(c, 0) << 7 - j)
g = g | (bitget(c, 1) << 7 - j)
b = b | (bitget(c, 2) << 7 - j)
c = c >> 3
cmap[i] = np.array([r, g, b])
cmap = cmap / 255 if normalized else cmap
return cmap
def overlay_semantic_mask(im, ann, alpha=0.5, colors=None, contour_thickness=None):
im, ann = np.asarray(im, dtype=np.uint8), np.asarray(ann, dtype=np.int)
if im.shape[:-1] != ann.shape:
raise ValueError('First two dimensions of `im` and `ann` must match')
if im.shape[-1] != 3:
raise ValueError('im must have three channels at the 3 dimension')
colors = colors or _pascal_color_map()
colors = np.asarray(colors, dtype=np.uint8)
mask = colors[ann]
fg = im * alpha + (1 - alpha) * mask
img = im.copy()
img[ann > 0] = fg[ann > 0]
if contour_thickness: # pragma: no cover
import cv2
for obj_id in np.unique(ann[ann > 0]):
contours = cv2.findContours((ann == obj_id).astype(
np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:]
cv2.drawContours(img, contours[0], -1, colors[obj_id].tolist(),
contour_thickness)
return img
def generate_obj_proposals(davis_root, subset, num_proposals, save_path):
dataset = DAVIS(davis_root, subset=subset, codalab=True)
for seq in dataset.get_sequences():
save_dir = os.path.join(save_path, seq)
if os.path.exists(save_dir):
continue
all_gt_masks, all_masks_id = dataset.get_all_masks(seq, True)
img_size = all_gt_masks.shape[2:]
num_rows = int(np.ceil(np.sqrt(num_proposals)))
proposals = np.zeros((num_proposals, len(all_masks_id), *img_size))
height_slices = np.floor(np.arange(0, img_size[0] + 1, img_size[0]/num_rows)).astype(np.uint).tolist()
width_slices = np.floor(np.arange(0, img_size[1] + 1, img_size[1]/num_rows)).astype(np.uint).tolist()
ii = 0
prev_h, prev_w = 0, 0
for h in height_slices[1:]:
for w in width_slices[1:]:
proposals[ii, :, prev_h:h, prev_w:w] = 1
prev_w = w
ii += 1
if ii == num_proposals:
break
prev_h, prev_w = h, 0
if ii == num_proposals:
break
os.makedirs(save_dir, exist_ok=True)
for i, mask_id in enumerate(all_masks_id):
mask = np.sum(proposals[:, i, ...] * np.arange(1, proposals.shape[0] + 1)[:, None, None], axis=0)
save_mask(mask, os.path.join(save_dir, f'{mask_id}.png'))
def generate_random_permutation_gt_obj_proposals(davis_root, subset, save_path):
dataset = DAVIS(davis_root, subset=subset, codalab=True)
for seq in dataset.get_sequences():
gt_masks, all_masks_id = dataset.get_all_masks(seq, True)
obj_swap = np.random.permutation(np.arange(gt_masks.shape[0]))
gt_masks = gt_masks[obj_swap, ...]
save_dir = os.path.join(save_path, seq)
os.makedirs(save_dir, exist_ok=True)
for i, mask_id in enumerate(all_masks_id):
mask = np.sum(gt_masks[:, i, ...] * np.arange(1, gt_masks.shape[0] + 1)[:, None, None], axis=0)
save_mask(mask, os.path.join(save_dir, f'{mask_id}.png'))
def color_map(N=256, normalized=False):
def bitget(byteval, idx):
return ((byteval & (1 << idx)) != 0)
dtype = 'float32' if normalized else 'uint8'
cmap = np.zeros((N, 3), dtype=dtype)
for i in range(N):
r = g = b = 0
c = i
for j in range(8):
r = r | (bitget(c, 0) << 7-j)
g = g | (bitget(c, 1) << 7-j)
b = b | (bitget(c, 2) << 7-j)
c = c >> 3
cmap[i] = np.array([r, g, b])
cmap = cmap/255 if normalized else cmap
return cmap
def save_mask(mask, img_path):
if np.max(mask) > 255:
raise ValueError('Maximum id pixel value is 255')
mask_img = Image.fromarray(mask.astype(np.uint8))
mask_img.putpalette(color_map().flatten().tolist())
mask_img.save(img_path)
def db_statistics(per_frame_values):
""" Compute mean,recall and decay from per-frame evaluation.
Arguments:
per_frame_values (ndarray): per-frame evaluation
Returns:
M,O,D (float,float,float):
return evaluation statistics: mean,recall,decay.
"""
# strip off nan values
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
M = np.nanmean(per_frame_values)
O = np.nanmean(per_frame_values > 0.5)
N_bins = 4
ids = np.round(np.linspace(1, len(per_frame_values), N_bins + 1) + 1e-10) - 1
ids = ids.astype(np.uint8)
D_bins = [per_frame_values[ids[i]:ids[i + 1] + 1] for i in range(0, 4)]
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
D = np.nanmean(D_bins[0]) - np.nanmean(D_bins[3])
return M, O, D
def list_files(dir, extension=".png"):
return [os.path.splitext(file_)[0] for file_ in os.listdir(dir) if file_.endswith(extension)]
def force_symlink(file1, file2):
try:
os.symlink(file1, file2)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(file2)
os.symlink(file1, file2)
| 6,009 | 33.342857 | 110 | py |
davis2017-evaluation | davis2017-evaluation-master/davis2017/results.py | import os
import numpy as np
from PIL import Image
import sys
class Results(object):
def __init__(self, root_dir):
self.root_dir = root_dir
def _read_mask(self, sequence, frame_id):
try:
mask_path = os.path.join(self.root_dir, sequence, f'{frame_id}.png')
return np.array(Image.open(mask_path))
except IOError as err:
sys.stdout.write(sequence + " frame %s not found!\n" % frame_id)
sys.stdout.write("The frames have to be indexed PNG files placed inside the corespondent sequence "
"folder.\nThe indexes have to match with the initial frame.\n")
sys.stderr.write("IOError: " + err.strerror + "\n")
sys.exit()
def read_masks(self, sequence, masks_id):
mask_0 = self._read_mask(sequence, masks_id[0])
masks = np.zeros((len(masks_id), *mask_0.shape))
for ii, m in enumerate(masks_id):
masks[ii, ...] = self._read_mask(sequence, m)
num_objects = int(np.max(masks))
tmp = np.ones((num_objects, *masks.shape))
tmp = tmp * np.arange(1, num_objects + 1)[:, None, None, None]
masks = (tmp == masks[None, ...]) > 0
return masks
| 1,236 | 37.65625 | 111 | py |
davis2017-evaluation | davis2017-evaluation-master/davis2017/metrics.py | import math
import numpy as np
import cv2
def db_eval_iou(annotation, segmentation, void_pixels=None):
""" Compute region similarity as the Jaccard Index.
Arguments:
annotation (ndarray): binary annotation map.
segmentation (ndarray): binary segmentation map.
void_pixels (ndarray): optional mask with void pixels
Return:
jaccard (float): region similarity
"""
assert annotation.shape == segmentation.shape, \
f'Annotation({annotation.shape}) and segmentation:{segmentation.shape} dimensions do not match.'
annotation = annotation.astype(bool)
segmentation = segmentation.astype(bool)
if void_pixels is not None:
assert annotation.shape == void_pixels.shape, \
f'Annotation({annotation.shape}) and void pixels:{void_pixels.shape} dimensions do not match.'
void_pixels = void_pixels.astype(bool)
else:
void_pixels = np.zeros_like(segmentation)
# Intersection between all sets
inters = np.sum((segmentation & annotation) & np.logical_not(void_pixels), axis=(-2, -1))
union = np.sum((segmentation | annotation) & np.logical_not(void_pixels), axis=(-2, -1))
j = inters / union
if j.ndim == 0:
j = 1 if np.isclose(union, 0) else j
else:
j[np.isclose(union, 0)] = 1
return j
def db_eval_boundary(annotation, segmentation, void_pixels=None, bound_th=0.008):
assert annotation.shape == segmentation.shape
if void_pixels is not None:
assert annotation.shape == void_pixels.shape
if annotation.ndim == 3:
n_frames = annotation.shape[0]
f_res = np.zeros(n_frames)
for frame_id in range(n_frames):
void_pixels_frame = None if void_pixels is None else void_pixels[frame_id, :, :, ]
f_res[frame_id] = f_measure(segmentation[frame_id, :, :, ], annotation[frame_id, :, :], void_pixels_frame, bound_th=bound_th)
elif annotation.ndim == 2:
f_res = f_measure(segmentation, annotation, void_pixels, bound_th=bound_th)
else:
raise ValueError(f'db_eval_boundary does not support tensors with {annotation.ndim} dimensions')
return f_res
def f_measure(foreground_mask, gt_mask, void_pixels=None, bound_th=0.008):
"""
Compute mean,recall and decay from per-frame evaluation.
Calculates precision/recall for boundaries between foreground_mask and
gt_mask using morphological operators to speed it up.
Arguments:
foreground_mask (ndarray): binary segmentation image.
gt_mask (ndarray): binary annotated image.
void_pixels (ndarray): optional mask with void pixels
Returns:
F (float): boundaries F-measure
"""
assert np.atleast_3d(foreground_mask).shape[2] == 1
if void_pixels is not None:
void_pixels = void_pixels.astype(bool)
else:
void_pixels = np.zeros_like(foreground_mask).astype(bool)
bound_pix = bound_th if bound_th >= 1 else \
np.ceil(bound_th * np.linalg.norm(foreground_mask.shape))
# Get the pixel boundaries of both masks
fg_boundary = _seg2bmap(foreground_mask * np.logical_not(void_pixels))
gt_boundary = _seg2bmap(gt_mask * np.logical_not(void_pixels))
from skimage.morphology import disk
# fg_dil = binary_dilation(fg_boundary, disk(bound_pix))
fg_dil = cv2.dilate(fg_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8))
# gt_dil = binary_dilation(gt_boundary, disk(bound_pix))
gt_dil = cv2.dilate(gt_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8))
# Get the intersection
gt_match = gt_boundary * fg_dil
fg_match = fg_boundary * gt_dil
# Area of the intersection
n_fg = np.sum(fg_boundary)
n_gt = np.sum(gt_boundary)
# % Compute precision and recall
if n_fg == 0 and n_gt > 0:
precision = 1
recall = 0
elif n_fg > 0 and n_gt == 0:
precision = 0
recall = 1
elif n_fg == 0 and n_gt == 0:
precision = 1
recall = 1
else:
precision = np.sum(fg_match) / float(n_fg)
recall = np.sum(gt_match) / float(n_gt)
# Compute F measure
if precision + recall == 0:
F = 0
else:
F = 2 * precision * recall / (precision + recall)
return F
def _seg2bmap(seg, width=None, height=None):
"""
From a segmentation, compute a binary boundary map with 1 pixel wide
boundaries. The boundary pixels are offset by 1/2 pixel towards the
origin from the actual segment boundary.
Arguments:
seg : Segments labeled from 1..k.
width : Width of desired bmap <= seg.shape[1]
height : Height of desired bmap <= seg.shape[0]
Returns:
bmap (ndarray): Binary boundary map.
David Martin <[email protected]>
January 2003
"""
seg = seg.astype(bool)
seg[seg > 0] = 1
assert np.atleast_3d(seg).shape[2] == 1
width = seg.shape[1] if width is None else width
height = seg.shape[0] if height is None else height
h, w = seg.shape[:2]
ar1 = float(width) / float(height)
ar2 = float(w) / float(h)
assert not (
width > w | height > h | abs(ar1 - ar2) > 0.01
), "Can" "t convert %dx%d seg to %dx%d bmap." % (w, h, width, height)
e = np.zeros_like(seg)
s = np.zeros_like(seg)
se = np.zeros_like(seg)
e[:, :-1] = seg[:, 1:]
s[:-1, :] = seg[1:, :]
se[:-1, :-1] = seg[1:, 1:]
b = seg ^ e | seg ^ s | seg ^ se
b[-1, :] = seg[-1, :] ^ e[-1, :]
b[:, -1] = seg[:, -1] ^ s[:, -1]
b[-1, -1] = 0
if w == width and h == height:
bmap = b
else:
bmap = np.zeros((height, width))
for x in range(w):
for y in range(h):
if b[y, x]:
j = 1 + math.floor((y - 1) + height / h)
i = 1 + math.floor((x - 1) + width / h)
bmap[j, i] = 1
return bmap
if __name__ == '__main__':
from davis2017.davis import DAVIS
from davis2017.results import Results
dataset = DAVIS(root='input_dir/ref', subset='val', sequences='aerobatics')
results = Results(root_dir='examples/osvos')
# Test timing F measure
for seq in dataset.get_sequences():
all_gt_masks, _, all_masks_id = dataset.get_all_masks(seq, True)
all_gt_masks, all_masks_id = all_gt_masks[:, 1:-1, :, :], all_masks_id[1:-1]
all_res_masks = results.read_masks(seq, all_masks_id)
f_metrics_res = np.zeros(all_gt_masks.shape[:2])
for ii in range(all_gt_masks.shape[0]):
f_metrics_res[ii, :] = db_eval_boundary(all_gt_masks[ii, ...], all_res_masks[ii, ...])
# Run using to profile code: python -m cProfile -o f_measure.prof metrics.py
# snakeviz f_measure.prof
| 6,823 | 33.464646 | 137 | py |
davis2017-evaluation | davis2017-evaluation-master/davis2017/__init__.py | from __future__ import absolute_import
__version__ = '0.1.0'
| 62 | 14.75 | 38 | py |
davis2017-evaluation | davis2017-evaluation-master/davis2017/evaluation.py | import sys
from tqdm import tqdm
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning)
import numpy as np
from davis2017.davis import DAVIS
from davis2017.metrics import db_eval_boundary, db_eval_iou
from davis2017 import utils
from davis2017.results import Results
from scipy.optimize import linear_sum_assignment
class DAVISEvaluation(object):
def __init__(self, davis_root, task, gt_set, sequences='all', codalab=False):
"""
Class to evaluate DAVIS sequences from a certain set and for a certain task
:param davis_root: Path to the DAVIS folder that contains JPEGImages, Annotations, etc. folders.
:param task: Task to compute the evaluation, chose between semi-supervised or unsupervised.
:param gt_set: Set to compute the evaluation
:param sequences: Sequences to consider for the evaluation, 'all' to use all the sequences in a set.
"""
self.davis_root = davis_root
self.task = task
self.dataset = DAVIS(root=davis_root, task=task, subset=gt_set, sequences=sequences, codalab=codalab)
@staticmethod
def _evaluate_semisupervised(all_gt_masks, all_res_masks, all_void_masks, metric):
if all_res_masks.shape[0] > all_gt_masks.shape[0]:
sys.stdout.write("\nIn your PNG files there is an index higher than the number of objects in the sequence!")
sys.exit()
elif all_res_masks.shape[0] < all_gt_masks.shape[0]:
zero_padding = np.zeros((all_gt_masks.shape[0] - all_res_masks.shape[0], *all_res_masks.shape[1:]))
all_res_masks = np.concatenate([all_res_masks, zero_padding], axis=0)
j_metrics_res, f_metrics_res = np.zeros(all_gt_masks.shape[:2]), np.zeros(all_gt_masks.shape[:2])
for ii in range(all_gt_masks.shape[0]):
if 'J' in metric:
j_metrics_res[ii, :] = db_eval_iou(all_gt_masks[ii, ...], all_res_masks[ii, ...], all_void_masks)
if 'F' in metric:
f_metrics_res[ii, :] = db_eval_boundary(all_gt_masks[ii, ...], all_res_masks[ii, ...], all_void_masks)
return j_metrics_res, f_metrics_res
@staticmethod
def _evaluate_unsupervised(all_gt_masks, all_res_masks, all_void_masks, metric, max_n_proposals=20):
if all_res_masks.shape[0] > max_n_proposals:
sys.stdout.write(f"\nIn your PNG files there is an index higher than the maximum number ({max_n_proposals}) of proposals allowed!")
sys.exit()
elif all_res_masks.shape[0] < all_gt_masks.shape[0]:
zero_padding = np.zeros((all_gt_masks.shape[0] - all_res_masks.shape[0], *all_res_masks.shape[1:]))
all_res_masks = np.concatenate([all_res_masks, zero_padding], axis=0)
j_metrics_res = np.zeros((all_res_masks.shape[0], all_gt_masks.shape[0], all_gt_masks.shape[1]))
f_metrics_res = np.zeros((all_res_masks.shape[0], all_gt_masks.shape[0], all_gt_masks.shape[1]))
for ii in range(all_gt_masks.shape[0]):
for jj in range(all_res_masks.shape[0]):
if 'J' in metric:
j_metrics_res[jj, ii, :] = db_eval_iou(all_gt_masks[ii, ...], all_res_masks[jj, ...], all_void_masks)
if 'F' in metric:
f_metrics_res[jj, ii, :] = db_eval_boundary(all_gt_masks[ii, ...], all_res_masks[jj, ...], all_void_masks)
if 'J' in metric and 'F' in metric:
all_metrics = (np.mean(j_metrics_res, axis=2) + np.mean(f_metrics_res, axis=2)) / 2
else:
all_metrics = np.mean(j_metrics_res, axis=2) if 'J' in metric else np.mean(f_metrics_res, axis=2)
row_ind, col_ind = linear_sum_assignment(-all_metrics)
return j_metrics_res[row_ind, col_ind, :], f_metrics_res[row_ind, col_ind, :]
def evaluate(self, res_path, metric=('J', 'F'), debug=False):
metric = metric if isinstance(metric, tuple) or isinstance(metric, list) else [metric]
if 'T' in metric:
raise ValueError('Temporal metric not supported!')
if 'J' not in metric and 'F' not in metric:
raise ValueError('Metric possible values are J for IoU or F for Boundary')
# Containers
metrics_res = {}
if 'J' in metric:
metrics_res['J'] = {"M": [], "R": [], "D": [], "M_per_object": {}}
if 'F' in metric:
metrics_res['F'] = {"M": [], "R": [], "D": [], "M_per_object": {}}
# Sweep all sequences
results = Results(root_dir=res_path)
for seq in tqdm(list(self.dataset.get_sequences())):
all_gt_masks, all_void_masks, all_masks_id = self.dataset.get_all_masks(seq, True)
if self.task == 'semi-supervised':
all_gt_masks, all_masks_id = all_gt_masks[:, 1:-1, :, :], all_masks_id[1:-1]
all_res_masks = results.read_masks(seq, all_masks_id)
if self.task == 'unsupervised':
j_metrics_res, f_metrics_res = self._evaluate_unsupervised(all_gt_masks, all_res_masks, all_void_masks, metric)
elif self.task == 'semi-supervised':
j_metrics_res, f_metrics_res = self._evaluate_semisupervised(all_gt_masks, all_res_masks, None, metric)
for ii in range(all_gt_masks.shape[0]):
seq_name = f'{seq}_{ii+1}'
if 'J' in metric:
[JM, JR, JD] = utils.db_statistics(j_metrics_res[ii])
metrics_res['J']["M"].append(JM)
metrics_res['J']["R"].append(JR)
metrics_res['J']["D"].append(JD)
metrics_res['J']["M_per_object"][seq_name] = JM
if 'F' in metric:
[FM, FR, FD] = utils.db_statistics(f_metrics_res[ii])
metrics_res['F']["M"].append(FM)
metrics_res['F']["R"].append(FR)
metrics_res['F']["D"].append(FD)
metrics_res['F']["M_per_object"][seq_name] = FM
# Show progress
if debug:
sys.stdout.write(seq + '\n')
sys.stdout.flush()
return metrics_res
| 6,143 | 54.351351 | 143 | py |
davis2017-evaluation | davis2017-evaluation-master/davis2017/davis.py | import os
from glob import glob
from collections import defaultdict
import numpy as np
from PIL import Image
class DAVIS(object):
SUBSET_OPTIONS = ['train', 'val', 'test-dev', 'test-challenge']
TASKS = ['semi-supervised', 'unsupervised']
DATASET_WEB = 'https://davischallenge.org/davis2017/code.html'
VOID_LABEL = 255
def __init__(self, root, task='unsupervised', subset='val', sequences='all', resolution='480p', codalab=False):
"""
Class to read the DAVIS dataset
:param root: Path to the DAVIS folder that contains JPEGImages, Annotations, etc. folders.
:param task: Task to load the annotations, choose between semi-supervised or unsupervised.
:param subset: Set to load the annotations
:param sequences: Sequences to consider, 'all' to use all the sequences in a set.
:param resolution: Specify the resolution to use the dataset, choose between '480' and 'Full-Resolution'
"""
if subset not in self.SUBSET_OPTIONS:
raise ValueError(f'Subset should be in {self.SUBSET_OPTIONS}')
if task not in self.TASKS:
raise ValueError(f'The only tasks that are supported are {self.TASKS}')
self.task = task
self.subset = subset
self.root = root
self.img_path = os.path.join(self.root, 'JPEGImages', resolution)
annotations_folder = 'Annotations' if task == 'semi-supervised' else 'Annotations_unsupervised'
self.mask_path = os.path.join(self.root, annotations_folder, resolution)
year = '2019' if task == 'unsupervised' and (subset == 'test-dev' or subset == 'test-challenge') else '2017'
self.imagesets_path = os.path.join(self.root, 'ImageSets', year)
self._check_directories()
if sequences == 'all':
with open(os.path.join(self.imagesets_path, f'{self.subset}.txt'), 'r') as f:
tmp = f.readlines()
sequences_names = [x.strip() for x in tmp]
else:
sequences_names = sequences if isinstance(sequences, list) else [sequences]
self.sequences = defaultdict(dict)
for seq in sequences_names:
images = np.sort(glob(os.path.join(self.img_path, seq, '*.jpg'))).tolist()
if len(images) == 0 and not codalab:
raise FileNotFoundError(f'Images for sequence {seq} not found.')
self.sequences[seq]['images'] = images
masks = np.sort(glob(os.path.join(self.mask_path, seq, '*.png'))).tolist()
masks.extend([-1] * (len(images) - len(masks)))
self.sequences[seq]['masks'] = masks
def _check_directories(self):
if not os.path.exists(self.root):
raise FileNotFoundError(f'DAVIS not found in the specified directory, download it from {self.DATASET_WEB}')
if not os.path.exists(os.path.join(self.imagesets_path, f'{self.subset}.txt')):
raise FileNotFoundError(f'Subset sequences list for {self.subset} not found, download the missing subset '
f'for the {self.task} task from {self.DATASET_WEB}')
if self.subset in ['train', 'val'] and not os.path.exists(self.mask_path):
raise FileNotFoundError(f'Annotations folder for the {self.task} task not found, download it from {self.DATASET_WEB}')
def get_frames(self, sequence):
for img, msk in zip(self.sequences[sequence]['images'], self.sequences[sequence]['masks']):
image = np.array(Image.open(img))
mask = None if msk is None else np.array(Image.open(msk))
yield image, mask
def _get_all_elements(self, sequence, obj_type):
obj = np.array(Image.open(self.sequences[sequence][obj_type][0]))
all_objs = np.zeros((len(self.sequences[sequence][obj_type]), *obj.shape))
obj_id = []
for i, obj in enumerate(self.sequences[sequence][obj_type]):
all_objs[i, ...] = np.array(Image.open(obj))
obj_id.append(''.join(obj.split('/')[-1].split('.')[:-1]))
return all_objs, obj_id
def get_all_images(self, sequence):
return self._get_all_elements(sequence, 'images')
def get_all_masks(self, sequence, separate_objects_masks=False):
masks, masks_id = self._get_all_elements(sequence, 'masks')
masks_void = np.zeros_like(masks)
# Separate void and object masks
for i in range(masks.shape[0]):
masks_void[i, ...] = masks[i, ...] == 255
masks[i, masks[i, ...] == 255] = 0
if separate_objects_masks:
num_objects = int(np.max(masks[0, ...]))
tmp = np.ones((num_objects, *masks.shape))
tmp = tmp * np.arange(1, num_objects + 1)[:, None, None, None]
masks = (tmp == masks[None, ...])
masks = masks > 0
return masks, masks_void, masks_id
def get_sequences(self):
for seq in self.sequences:
yield seq
if __name__ == '__main__':
from matplotlib import pyplot as plt
only_first_frame = True
subsets = ['train', 'val']
for s in subsets:
dataset = DAVIS(root='/home/csergi/scratch2/Databases/DAVIS2017_private', subset=s)
for seq in dataset.get_sequences():
g = dataset.get_frames(seq)
img, mask = next(g)
plt.subplot(2, 1, 1)
plt.title(seq)
plt.imshow(img)
plt.subplot(2, 1, 2)
plt.imshow(mask)
plt.show(block=True)
| 5,514 | 43.837398 | 130 | py |
mapalignment | mapalignment-master/projects/mapalign/evaluate_funcs/evaluate_utils.py | import sys
import numpy as np
sys.path.append("../../utils")
import polygon_utils
def compute_batch_polygon_distances(gt_polygons_batch, aligned_disp_polygons_batch):
# Compute distances
distances = np.sqrt(np.sum(np.square(aligned_disp_polygons_batch - gt_polygons_batch), axis=-1))
min = np.nanmin(distances)
mean = np.nanmean(distances)
max = np.nanmax(distances)
return min, mean, max
def compute_threshold_accuracies(gt_vertices_batch, pred_vertices_batch, thresholds):
stripped_gt_polygons_list = []
stripped_pred_polygons_list = []
for gt_vertices, pred_vertices in zip(gt_vertices_batch, pred_vertices_batch):
for gt_polygon, pred_polygon in zip(gt_vertices, pred_vertices):
# Find first nan occurance
nan_indices = np.where(np.isnan(gt_polygon[:, 0]))[0]
if len(nan_indices):
nan_index = nan_indices[0]
if nan_index:
gt_polygon = gt_polygon[:nan_index, :]
pred_polygon = pred_polygon[:nan_index, :]
else:
# Empty polygon, break the for loop
break
gt_polygon = polygon_utils.strip_redundant_vertex(gt_polygon, epsilon=1e-3)
pred_polygon = polygon_utils.strip_redundant_vertex(pred_polygon, epsilon=1e-3)
stripped_gt_polygons_list.append(gt_polygon)
stripped_pred_polygons_list.append(pred_polygon)
if len(stripped_gt_polygons_list) == 0 or len(stripped_pred_polygons_list) == 0:
return []
stripped_gt_polygons = np.concatenate(stripped_gt_polygons_list)
stripped_pred_polygons = np.concatenate(stripped_pred_polygons_list)
distances = np.sqrt(np.sum(np.square(stripped_gt_polygons - stripped_pred_polygons), axis=-1))
# Compute thresholds count
threshold_accuracies = []
for threshold in thresholds:
accuracy = np.sum(distances <= threshold) / distances.size
threshold_accuracies.append(accuracy)
return threshold_accuracies
if __name__ == '__main__':
batch_size = 1
poly_count = 3
vertex_count = 4
gt_vertices = np.zeros((batch_size, poly_count, vertex_count, 2))
gt_vertices[0, 0, 0, :] = [1, 2]
gt_vertices[0, 0, 1, :] = [3, 4]
gt_vertices[0, 0, 2, :] = np.nan
gt_vertices[0, 1, 0, :] = np.nan
pred_vertices = np.zeros((batch_size, poly_count, vertex_count, 2))
pred_vertices[0, 0, 0, :] = [1, 2]
pred_vertices[0, 0, 1, :] = [3, 4]
pred_vertices[0, 0, 2, :] = np.nan
pred_vertices[0, 1, 0, :] = np.nan
thresholds = [1, 2, 3, 4, 5, 6, 7, 8]
threshold_accuracies = compute_threshold_accuracies(gt_vertices, pred_vertices, thresholds)
print("threshold_accuracies = {}".format(threshold_accuracies))
| 2,799 | 36.333333 | 100 | py |
mapalignment | mapalignment-master/projects/mapalign/dataset_utils/preprocess_bradbury_buildings_multires.py | import sys
import os
import json
import math
import skimage.transform
import skimage.draw
import numpy as np
# from PIL import Image, ImageDraw
# Image.MAX_IMAGE_PIXELS = 200000000
import tensorflow as tf
import config_bradbury_buildings_multires as config
sys.path.append("../../../data/bradbury_buildings_roads_height_dataset")
import read
# sys.path.append("../utils")
# import visualization
sys.path.append("../../utils")
import tf_utils
import polygon_utils
import image_utils
# import python_utils
import math_utils
import dataset_utils
# if python_utils.module_exists("matplotlib.pyplot"):
# import matplotlib.pyplot as plt
def downsample_gt_data(image, metadata, gt_polygons, normed_disp_field_maps, downsampling_factor):
# First, correct the downsampling_factor so that:
# A downsampling_factor of 1 results in a final pixel_size equal to config.REFERENCE_PIXEL_SIZE
# A downsampling_factor of 2 results in a final pixel_size equal to 2 * config.REFERENCE_PIXEL_SIZE
corrected_downsampling_factor = downsampling_factor * config.REFERENCE_PIXEL_SIZE / metadata["pixelsize"]
scale = 1 / corrected_downsampling_factor
downsampled_image = skimage.transform.rescale(image, scale, order=3, preserve_range=True, multichannel=True)
downsampled_image = downsampled_image.astype(image.dtype)
downsampled_gt_polygons = polygon_utils.rescale_polygon(gt_polygons, scale)
downsampled_normed_disp_field_maps = np.empty((normed_disp_field_maps.shape[0],
round(
normed_disp_field_maps.shape[1] / corrected_downsampling_factor),
round(
normed_disp_field_maps.shape[2] / corrected_downsampling_factor),
normed_disp_field_maps.shape[3]))
for i in range(normed_disp_field_maps.shape[0]):
downsampled_normed_disp_field_maps[i] = skimage.transform.rescale(normed_disp_field_maps[i],
scale, order=3,
preserve_range=True, multichannel=True)
return downsampled_image, downsampled_gt_polygons, downsampled_normed_disp_field_maps
def generate_disp_data(normed_disp_field_maps, gt_polygons, disp_max_abs_value, spatial_shape):
scaled_disp_field_maps = normed_disp_field_maps * disp_max_abs_value
disp_polygons_list = polygon_utils.apply_displacement_fields_to_polygons(gt_polygons,
scaled_disp_field_maps)
disp_polygon_maps = polygon_utils.draw_polygon_maps(disp_polygons_list, spatial_shape, fill=True,
edges=True, vertices=True)
return disp_polygons_list, disp_polygon_maps
def process_sample_into_patches(patch_stride, patch_res, image, gt_polygon_map, disp_field_maps, disp_polygon_maps,
gt_polygons=None, disp_polygons_list=None):
"""
Crops all inputs to patches generated with patch_stride and patch_res
:param patch_stride:
:param patch_res:
:param image:
:param gt_polygon_map:
:param disp_field_maps:
:param disp_polygon_maps:
:param gt_polygons:
:param disp_polygons_list:
:return:
"""
include_polygons = gt_polygons is not None and disp_polygons_list is not None
patches = []
patch_boundingboxes = image_utils.compute_patch_boundingboxes(image.shape[0:2],
stride=patch_stride,
patch_res=patch_res)
# print(patch_boundingboxes)
for patch_boundingbox in patch_boundingboxes:
# Crop image
patch_image = image[patch_boundingbox[0]:patch_boundingbox[2], patch_boundingbox[1]:patch_boundingbox[3], :]
if include_polygons:
patch_gt_polygons, \
patch_disp_polygons_array = polygon_utils.prepare_polygons_for_tfrecord(gt_polygons, disp_polygons_list,
patch_boundingbox)
else:
patch_gt_polygons = patch_disp_polygons_array = None
patch_gt_polygon_map = gt_polygon_map[patch_boundingbox[0]:patch_boundingbox[2],
patch_boundingbox[1]:patch_boundingbox[3], :]
patch_disp_field_maps = disp_field_maps[:,
patch_boundingbox[0]:patch_boundingbox[2],
patch_boundingbox[1]:patch_boundingbox[3], :]
patch_disp_polygon_maps_array = disp_polygon_maps[:,
patch_boundingbox[0]:patch_boundingbox[2],
patch_boundingbox[1]:patch_boundingbox[3], :]
# Filter out patches based on presence of polygon and area ratio inside inner patch =
patch_inner_res = 2 * patch_stride
patch_padding = (patch_res - patch_inner_res) // 2
inner_patch_gt_polygon_map_corners = patch_gt_polygon_map[patch_padding:-patch_padding,
patch_padding:-patch_padding, 2]
if np.sum(inner_patch_gt_polygon_map_corners) \
and (not include_polygons or (include_polygons and patch_gt_polygons is not None)):
assert patch_image.shape[0] == patch_image.shape[
1], "image should be square otherwise tile_res cannot be defined"
tile_res = patch_image.shape[0]
disp_map_count = patch_disp_polygon_maps_array.shape[0]
patches.append({
"tile_res": tile_res,
"disp_map_count": disp_map_count,
"image": patch_image,
"gt_polygons": patch_gt_polygons,
"disp_polygons": patch_disp_polygons_array,
"gt_polygon_map": patch_gt_polygon_map,
"disp_field_maps": patch_disp_field_maps,
"disp_polygon_maps": patch_disp_polygon_maps_array,
})
return patches
def save_patch_to_tfrecord(patch, shard_writer):
# print(patch["disp_field_maps"].min() / 2147483647, patch["disp_field_maps"].max() / 2147483647)
# visualization.plot_field_map("disp_field_map", patch["disp_field_maps"][0])
# Compress image into jpg
image_raw = image_utils.convert_array_to_jpg_bytes(patch["image"], mode="RGB")
gt_polygon_map_raw = patch["gt_polygon_map"].tostring() # TODO: convert to png
disp_field_maps_raw = patch["disp_field_maps"].tostring()
disp_polygon_maps_raw = patch[
"disp_polygon_maps"].tostring() # TODO: convert to png (have to handle several png images...)
if patch["gt_polygons"] is not None and patch["disp_polygons"] is not None:
gt_polygons_raw = patch["gt_polygons"].tostring()
disp_polygons_raw = patch["disp_polygons"].tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'tile_res': tf_utils.int64_feature(patch["tile_res"]),
'disp_map_count': tf_utils.int64_feature(patch["disp_map_count"]),
'image': tf_utils.bytes_feature(image_raw),
'gt_polygon_count': tf_utils.int64_feature(patch["gt_polygons"].shape[0]),
'gt_polygon_length': tf_utils.int64_feature(patch["gt_polygons"].shape[1]),
'gt_polygons': tf_utils.bytes_feature(gt_polygons_raw),
'disp_polygons': tf_utils.bytes_feature(disp_polygons_raw),
'gt_polygon_map': tf_utils.bytes_feature(gt_polygon_map_raw),
'disp_field_maps': tf_utils.bytes_feature(disp_field_maps_raw),
'disp_polygon_maps': tf_utils.bytes_feature(disp_polygon_maps_raw)
}))
else:
example = tf.train.Example(features=tf.train.Features(feature={
'tile_res': tf_utils.int64_feature(patch["tile_res"]),
'disp_map_count': tf_utils.int64_feature(patch["disp_map_count"]),
'image': tf_utils.bytes_feature(image_raw),
'gt_polygon_map': tf_utils.bytes_feature(gt_polygon_map_raw),
'disp_field_maps': tf_utils.bytes_feature(disp_field_maps_raw),
'disp_polygon_maps': tf_utils.bytes_feature(disp_polygon_maps_raw),
}))
shard_writer.write(example.SerializeToString())
def process_image(dataset_raw_dirpath, image_info, overwrite_polygons_filename_extension, patch_stride, patch_res, downsampling_factors, disp_max_abs_value,
include_polygons,
downsampling_factor_writers):
"""
Writes to all the writers (one for each resolution) all sample patches extracted from the image_info.
:param raw_dirpath:
:param image_info:
:param patch_stride:
:param patch_res:
:param downsampling_factors:
:param disp_max_abs_value:
:param include_polygons:
:param downsampling_factor_writers:
:return:
"""
ori_image, ori_metadata, ori_gt_polygons = read.load_gt_data(dataset_raw_dirpath, image_info["city"],
image_info["number"], overwrite_polygons_filename_extension=overwrite_polygons_filename_extension)
if ori_gt_polygons is None:
return False
ori_gt_polygons = polygon_utils.polygons_remove_holes(ori_gt_polygons) # TODO: Remove
# Remove redundant vertices
ori_gt_polygons = polygon_utils.simplify_polygons(ori_gt_polygons, tolerance=1)
# visualization.init_figures(["gt_data"], figsize=(60, 40))
# visualization.plot_example_polygons("gt_data", ori_image, ori_gt_polygons)
# Create displacement maps
ori_normed_disp_field_maps = math_utils.create_displacement_field_maps(ori_image.shape[:2], config.DISP_MAP_COUNT,
config.DISP_MODES,
config.DISP_GAUSS_MU_RANGE,
config.DISP_GAUSS_SIG_SCALING) # TODO: uncomment
# ori_normed_disp_field_maps = np.zeros((config.DISP_MAP_COUNT, ori_image.shape[0], ori_image.shape[1], 2)) # TODO: remove
# # TODO: remove
# np.random.seed(seed=0)
# colors = np.random.randint(0, 255, size=(len(downsampling_factors), 3), dtype=np.uint8)
for index, downsampling_factor in enumerate(downsampling_factors):
print("downsampling_factor: {}".format(downsampling_factor))
# Downsample ground-truth
image, gt_polygons, normed_disp_field_maps = downsample_gt_data(ori_image, ori_metadata, ori_gt_polygons,
ori_normed_disp_field_maps, downsampling_factor)
spatial_shape = image.shape[:2]
# Random color
# image = np.tile(colors[index], reps=[image.shape[0], image.shape[1], 1]) # TODO: remove
# Draw gt polygon map
gt_polygon_map = polygon_utils.draw_polygon_map(gt_polygons, spatial_shape, fill=True, edges=True,
vertices=True)
# Generate final displacement
disp_polygons_list, disp_polygon_maps = generate_disp_data(normed_disp_field_maps, gt_polygons,
disp_max_abs_value, spatial_shape)
# Compress data
gt_polygons = [polygon.astype(np.float16) for polygon in gt_polygons]
disp_polygons_list = [[polygon.astype(np.float16) for polygon in polygons] for polygons in disp_polygons_list]
disp_field_maps = normed_disp_field_maps * 32767 # int16 max value = 32767
disp_field_maps = np.round(disp_field_maps)
disp_field_maps = disp_field_maps.astype(np.int16)
# Cut sample into patches
if include_polygons:
patches = process_sample_into_patches(patch_stride, patch_res, image, gt_polygon_map,
disp_field_maps, disp_polygon_maps,
gt_polygons, disp_polygons_list)
else:
patches = process_sample_into_patches(patch_stride, patch_res, image, gt_polygon_map,
disp_field_maps, disp_polygon_maps)
for patch in patches:
save_patch_to_tfrecord(patch, downsampling_factor_writers[downsampling_factor])
return True
def process_dataset(dataset_fold, dataset_raw_dirpath,
patch_stride, patch_res, image_info_list, overwrite_polygons_filename_extension,
data_aug_rot,
downsampling_factors,
disp_max_abs_value):
print("Processing images from {}".format(dataset_raw_dirpath))
for image_index, image_info in enumerate(image_info_list):
image_name = read.IMAGE_NAME_FORMAT.format(city=image_info["city"], number=image_info["number"])
print("Processing city {}, number {}. Progression: {}/{}"
.format(image_info["city"], image_info["number"], image_index + 1, len(image_info_list)))
include_polygons = (dataset_fold == "val" or dataset_fold == "test")
if data_aug_rot and dataset_fold == "train":
# Account for data augmentation when rotating patches on the training set
adjusted_patch_res = math.ceil(patch_res * math.sqrt(2))
adjusted_patch_stride = math.floor(
patch_stride * math.sqrt(
2) / 2) # Divided by 2 so that no pixels are left out when rotating by 45 degrees
else:
adjusted_patch_res = patch_res
adjusted_patch_stride = patch_stride
# Filter out downsampling_factors that are lower than city_min_downsampling_factor
image_downsampling_factors = [downsampling_factor for downsampling_factor in downsampling_factors if
image_info["min_downsampling_factor"] <= downsampling_factor]
# Create writers
writers = {}
for downsampling_factor in downsampling_factors:
filename_format = os.path.join(config.TFRECORDS_DIR,
config.TFRECORD_FILEPATH_FORMAT.format(dataset_fold, image_name,
downsampling_factor))
shard_writer = dataset_utils.TFRecordShardWriter(filename_format, config.RECORDS_PER_SHARD)
writers[downsampling_factor] = shard_writer
process_image(dataset_raw_dirpath, image_info, overwrite_polygons_filename_extension,
adjusted_patch_stride, adjusted_patch_res,
image_downsampling_factors,
disp_max_abs_value,
include_polygons,
writers)
# Close writers
for downsampling_factor in downsampling_factors:
writers[downsampling_factor].close()
def save_metadata(meta_data_filepath, disp_max_abs_value, downsampling_factors):
data = {
"disp_max_abs_value": disp_max_abs_value,
"downsampling_factors": downsampling_factors,
}
with open(meta_data_filepath, 'w') as outfile:
json.dump(data, outfile)
def main():
# input("Prepare dataset, overwrites previous data. This can take a while (1h), press <Enter> to continue...")
# Create dataset tfrecords directory of it does not exist
if not os.path.exists(config.TFRECORDS_DIR):
os.makedirs(config.TFRECORDS_DIR)
# Save meta-data
meta_data_filepath = os.path.join(config.TFRECORDS_DIR, "metadata.txt")
save_metadata(meta_data_filepath, config.DISP_MAX_ABS_VALUE,
config.DOWNSAMPLING_FACTORS)
process_dataset("train",
config.DATASET_RAW_DIRPATH,
config.TILE_STRIDE,
config.TILE_RES,
config.TRAIN_IMAGES,
config.DATASET_OVERWRITE_POLYGONS_FILENAME_EXTENSION,
config.DATA_AUG_ROT,
config.DOWNSAMPLING_FACTORS,
config.DISP_MAX_ABS_VALUE)
process_dataset("val",
config.DATASET_RAW_DIRPATH,
config.TILE_STRIDE,
config.TILE_RES,
config.VAL_IMAGES,
config.DATASET_OVERWRITE_POLYGONS_FILENAME_EXTENSION,
config.DATA_AUG_ROT,
config.DOWNSAMPLING_FACTORS,
config.DISP_MAX_ABS_VALUE)
process_dataset("test",
config.DATASET_RAW_DIRPATH,
config.TILE_STRIDE,
config.TILE_RES,
config.TEST_IMAGES,
config.DATASET_OVERWRITE_POLYGONS_FILENAME_EXTENSION,
config.DATA_AUG_ROT,
config.DOWNSAMPLING_FACTORS,
config.DISP_MAX_ABS_VALUE)
if __name__ == "__main__":
main()
| 17,281 | 47.68169 | 163 | py |
mapalignment | mapalignment-master/projects/mapalign/dataset_utils/config_bradbury_buildings_multires.py | import os
def choose_first_existing_dir(dir_list):
for dir in dir_list:
if os.path.exists(dir):
return dir
return None
ROOT_DIR = choose_first_existing_dir([
"/local/shared/epitome-polygon-deep-learning", # Inria cluster node nefgpu23
"/home/nigirard/epitome-polygon-deep-learning", # Landsat
"/workspace", # Docker (mainly when using Deepsat or personal computer)
])
print("ROOT_DIR: {}".format(ROOT_DIR))
# Dataset offline pre-processing
DATASET_DIRPATH = os.path.join(ROOT_DIR, "data/bradbury_buildings_roads_height_dataset")
DATASET_RAW_DIRPATH = os.path.join(DATASET_DIRPATH, "raw")
DATASET_OVERWRITE_POLYGONS_FILENAME_EXTENSION = None # Can be "_aligned_noisy_building_polygons_1.npy"
TILE_RES = 220 # The maximum patch size will be 220. Adjusted for rotation will be ceil(220*sqrt(2)) = 312.
TILE_STRIDE = 100 # The maximum inner path res will be 100
# If True, generates patches with increased size to account for cropping during the online processing step
DATA_AUG_ROT = True # data_aug_rot only applies to train
TFRECORDS_DIR = os.path.join(DATASET_DIRPATH, "tfrecords.mapalign.multires.aligned_noisy_1")
TFRECORD_FILEPATH_FORMAT = "{}/{}/ds_fac_{:02d}.{{:06d}}.tfrecord" # Fold, image name, ds_fac, shard number
DISP_MAP_COUNT = 1 # Number of displacement applied to polygons to generate to the displaced gt map (1 minimum, more for dataset augmentation)
DISP_MODES = 30 # Number of Gaussians mixed up to make the displacement map (Default: 20)
DISP_GAUSS_MU_RANGE = [0, 1] # Coordinates are normalized to [0, 1] before the function is applied
DISP_GAUSS_SIG_SCALING = [0.0, 0.002] # Coordinates are normalized to [0, 1] before the function is applied
DISP_MAX_ABS_VALUE = 4
# DISP_MAP_MARGIN = 100 # Some polygons are slightly outside the image, this margin allows to take them into account
# Tile generation
REFERENCE_PIXEL_SIZE = 0.3 # In meters.
# All images in AerialImage Dataset have a pixel size of 0.3m and
# a lot of this dataset's images have the same pixel size so that is why we choose this pixel size as a reference.
DOWNSAMPLING_FACTORS = [1, 2, 4, 8]
# The resulting pixel sizes will be equal to [REFERENCE_PIXEL_SIZE/DOWNSAMPLING_FACTOR for DOWNSAMPLING_FACTOR in DOWNSAMPLING_FACTORS]
# Split data into TRAIN, VAL and TEST
TRAIN_IMAGES = [
{
"city": "Arlington",
"number": 1,
"min_downsampling_factor": 1,
},
{
"city": "Arlington",
"number": 2,
"min_downsampling_factor": 1,
},
{
"city": "Arlington",
"number": 3,
"min_downsampling_factor": 1,
},
{
"city": "Atlanta",
"number": 1,
"min_downsampling_factor": 1,
},
{
"city": "Atlanta",
"number": 2,
"min_downsampling_factor": 1,
},
{
"city": "Atlanta",
"number": 3,
"min_downsampling_factor": 1,
},
{
"city": "Austin",
"number": 1,
"min_downsampling_factor": 1,
},
{
"city": "Austin",
"number": 2,
"min_downsampling_factor": 1,
},
{
"city": "Austin",
"number": 3,
"min_downsampling_factor": 1,
},
{
"city": "DC",
"number": 1,
"min_downsampling_factor": 1,
},
{
"city": "DC",
"number": 2,
"min_downsampling_factor": 1,
},
{
"city": "NewHaven",
"number": 1,
"min_downsampling_factor": 1,
},
{
"city": "NewHaven",
"number": 2,
"min_downsampling_factor": 1,
},
{
"city": "NewYork",
"number": 1,
"min_downsampling_factor": 1,
},
{
"city": "NewYork",
"number": 2,
"min_downsampling_factor": 1,
},
{
"city": "NewYork",
"number": 3,
"min_downsampling_factor": 1,
},
{
"city": "Norfolk",
"number": 1,
"min_downsampling_factor": 1,
},
{
"city": "Norfolk",
"number": 2,
"min_downsampling_factor": 1,
},
{
"city": "Norfolk",
"number": 3,
"min_downsampling_factor": 1,
},
{
"city": "SanFrancisco",
"number": 1,
"min_downsampling_factor": 1,
},
{
"city": "SanFrancisco",
"number": 2,
"min_downsampling_factor": 1,
},
{
"city": "SanFrancisco",
"number": 3,
"min_downsampling_factor": 1,
},
{
"city": "Seekonk",
"number": 1,
"min_downsampling_factor": 1,
},
{
"city": "Seekonk",
"number": 2,
"min_downsampling_factor": 1,
},
{
"city": "Seekonk",
"number": 3,
"min_downsampling_factor": 1,
},
]
VAL_IMAGES = [
]
TEST_IMAGES = [
]
# Split large tfrecord into several smaller tfrecords (shards)
RECORDS_PER_SHARD = 100
### --- Save --- #
# TRAIN_IMAGES = [
# {
# "city": "Arlington",
# "number": 3,
# "min_downsampling_factor": 1,
# },
# {
# "city": "Atlanta",
# "number": 1,
# "min_downsampling_factor": 1,
# },
# {
# "city": "Atlanta",
# "number": 2,
# "min_downsampling_factor": 1,
# },
# {
# "city": "Atlanta",
# "number": 3,
# "min_downsampling_factor": 1,
# },
# {
# "city": "Austin",
# "number": 1,
# "min_downsampling_factor": 1,
# },
# {
# "city": "Austin",
# "number": 2,
# "min_downsampling_factor": 1,
# },
# {
# "city": "Austin",
# "number": 3,
# "min_downsampling_factor": 1,
# },
# {
# "city": "NewYork",
# "number": 2,
# "min_downsampling_factor": 1,
# },
# {
# "city": "SanFrancisco",
# "number": 1,
# "min_downsampling_factor": 1,
# },
# {
# "city": "SanFrancisco",
# "number": 2,
# "min_downsampling_factor": 1,
# },
# {
# "city": "SanFrancisco",
# "number": 3,
# "min_downsampling_factor": 1,
# },
# {
# "city": "Norfolk",
# "number": 1,
# "min_downsampling_factor": 1,
# },
# {
# "city": "Norfolk",
# "number": 2,
# "min_downsampling_factor": 1,
# },
# {
# "city": "Norfolk",
# "number": 3,
# "min_downsampling_factor": 1,
# },
# ]
| 6,538 | 23.582707 | 143 | py |
mapalignment | mapalignment-master/projects/mapalign/dataset_utils/preprocess_aerial_image_multires.py | import sys
import os
import math
import json
import random
import skimage.transform
import numpy as np
import tensorflow as tf
import config_aerial_image_multires as config
sys.path.append("../../../data/AerialImageDataset")
import read
# sys.path.append("../utils")
# import visualization
sys.path.append("../../utils")
import tf_utils
import polygon_utils
import image_utils
import python_utils
import math_utils
import dataset_utils
import print_utils
if python_utils.module_exists("matplotlib.pyplot"):
import matplotlib.pyplot as plt
def downsample_gt_data(image, gt_polygons, normed_disp_field_maps, downsampling_factor):
downsampled_image = skimage.transform.rescale(image, 1 / downsampling_factor, order=3, preserve_range=True, multichannel=True)
downsampled_image = downsampled_image.astype(image.dtype)
downsampled_gt_polygons = polygon_utils.rescale_polygon(gt_polygons, 1 / downsampling_factor)
downsampled_normed_disp_field_maps = np.empty((normed_disp_field_maps.shape[0],
round(normed_disp_field_maps.shape[1] / downsampling_factor),
round(normed_disp_field_maps.shape[2] / downsampling_factor),
normed_disp_field_maps.shape[3]))
for i in range(normed_disp_field_maps.shape[0]):
downsampled_normed_disp_field_maps[i] = skimage.transform.rescale(normed_disp_field_maps[i], 1 / downsampling_factor, order=3, preserve_range=True, multichannel=True)
return downsampled_image, downsampled_gt_polygons, downsampled_normed_disp_field_maps
def generate_disp_data(normed_disp_field_maps, gt_polygons, disp_max_abs_value, spatial_shape):
scaled_disp_field_maps = normed_disp_field_maps * disp_max_abs_value
disp_polygons_list = polygon_utils.apply_displacement_fields_to_polygons(gt_polygons,
scaled_disp_field_maps)
disp_polygon_maps = polygon_utils.draw_polygon_maps(disp_polygons_list, spatial_shape, fill=True,
edges=True, vertices=True)
return disp_polygons_list, disp_polygon_maps
def process_sample_into_patches(patch_stride, patch_res, image, gt_polygon_map, disp_field_maps, disp_polygon_maps,
gt_polygons=None, disp_polygons_list=None):
"""
Crops all inputs to patches generated with patch_stride and patch_res
:param patch_stride:
:param patch_res:
:param image:
:param gt_polygon_map:
:param disp_field_maps:
:param disp_polygon_maps:
:param gt_polygons:
:param disp_polygons_list:
:return:
"""
include_polygons = gt_polygons is not None and disp_polygons_list is not None
patches = []
patch_boundingboxes = image_utils.compute_patch_boundingboxes(image.shape[0:2],
stride=patch_stride,
patch_res=patch_res)
# print(patch_boundingboxes)
for patch_boundingbox in patch_boundingboxes:
# Crop image
patch_image = image[patch_boundingbox[0]:patch_boundingbox[2], patch_boundingbox[1]:patch_boundingbox[3], :]
if include_polygons:
patch_gt_polygons, \
patch_disp_polygons_array = polygon_utils.prepare_polygons_for_tfrecord(gt_polygons, disp_polygons_list,
patch_boundingbox)
else:
patch_gt_polygons = patch_disp_polygons_array = None
patch_gt_polygon_map = gt_polygon_map[patch_boundingbox[0]:patch_boundingbox[2],
patch_boundingbox[1]:patch_boundingbox[3], :]
patch_disp_field_maps = disp_field_maps[:,
patch_boundingbox[0]:patch_boundingbox[2],
patch_boundingbox[1]:patch_boundingbox[3], :]
patch_disp_polygon_maps_array = disp_polygon_maps[:,
patch_boundingbox[0]:patch_boundingbox[2],
patch_boundingbox[1]:patch_boundingbox[3], :]
# Filter out patches based on presence of polygon and area ratio inside inner patch =
patch_inner_res = 2 * patch_stride
patch_padding = (patch_res - patch_inner_res) // 2
inner_patch_gt_polygon_map_corners = patch_gt_polygon_map[patch_padding:-patch_padding, patch_padding:-patch_padding, 2]
if np.sum(inner_patch_gt_polygon_map_corners) \
and (not include_polygons or (include_polygons and patch_gt_polygons is not None)):
assert patch_image.shape[0] == patch_image.shape[1], "image should be square otherwise tile_res cannot be defined"
tile_res = patch_image.shape[0]
disp_map_count = patch_disp_polygon_maps_array.shape[0]
patches.append({
"tile_res": tile_res,
"disp_map_count": disp_map_count,
"image": patch_image,
"gt_polygons": patch_gt_polygons,
"disp_polygons": patch_disp_polygons_array,
"gt_polygon_map": patch_gt_polygon_map,
"disp_field_maps": patch_disp_field_maps,
"disp_polygon_maps": patch_disp_polygon_maps_array,
})
return patches
def save_patch_to_tfrecord(patch, shard_writer):
# print(patch["disp_field_maps"].min() / 2147483647, patch["disp_field_maps"].max() / 2147483647)
# visualization.plot_field_map("disp_field_map", patch["disp_field_maps"][0])
# Compress image into jpg
image_raw = image_utils.convert_array_to_jpg_bytes(patch["image"], mode="RGB")
gt_polygon_map_raw = patch["gt_polygon_map"].tostring() # TODO: convert to png
disp_field_maps_raw = patch["disp_field_maps"].tostring()
disp_polygon_maps_raw = patch["disp_polygon_maps"].tostring() # TODO: convert to png (have to handle several png images...)
if patch["gt_polygons"] is not None and patch["disp_polygons"] is not None:
gt_polygons_raw = patch["gt_polygons"].tostring()
disp_polygons_raw = patch["disp_polygons"].tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'tile_res': tf_utils.int64_feature(patch["tile_res"]),
'disp_map_count': tf_utils.int64_feature(patch["disp_map_count"]),
'image': tf_utils.bytes_feature(image_raw),
'gt_polygon_count': tf_utils.int64_feature(patch["gt_polygons"].shape[0]),
'gt_polygon_length': tf_utils.int64_feature(patch["gt_polygons"].shape[1]),
'gt_polygons': tf_utils.bytes_feature(gt_polygons_raw),
'disp_polygons': tf_utils.bytes_feature(disp_polygons_raw),
'gt_polygon_map': tf_utils.bytes_feature(gt_polygon_map_raw),
'disp_field_maps': tf_utils.bytes_feature(disp_field_maps_raw),
'disp_polygon_maps': tf_utils.bytes_feature(disp_polygon_maps_raw)
}))
else:
example = tf.train.Example(features=tf.train.Features(feature={
'tile_res': tf_utils.int64_feature(patch["tile_res"]),
'disp_map_count': tf_utils.int64_feature(patch["disp_map_count"]),
'image': tf_utils.bytes_feature(image_raw),
'gt_polygon_map': tf_utils.bytes_feature(gt_polygon_map_raw),
'disp_field_maps': tf_utils.bytes_feature(disp_field_maps_raw),
'disp_polygon_maps': tf_utils.bytes_feature(disp_polygon_maps_raw),
}))
shard_writer.write(example.SerializeToString())
def process_image(dataset_raw_dirpath, image_info, overwrite_polygon_dir_name, patch_stride, patch_res, downsampling_factors, disp_max_abs_value, include_polygons,
downsampling_factor_writers):
"""
Writes to all the writers (one for each resolution) all sample patches extracted from the image at location image_filepath.
:param dataset_raw_dirpath:
:param image_info:
:param overwrite_polygon_dir_name:
:param patch_stride:
:param patch_res:
:param downsampling_factors:
:param disp_max_abs_value:
:param include_polygons:
:param downsampling_factor_writers:
:return:
"""
ori_image, ori_metadata, ori_gt_polygons = read.load_gt_data(dataset_raw_dirpath, image_info["city"], image_info["number"], overwrite_polygon_dir_name=overwrite_polygon_dir_name)
if ori_gt_polygons is None:
return False
# visualization.init_figures(["gt_data"], figsize=(60, 40))
# visualization.plot_example_polygons("gt_data", ori_image, ori_gt_polygons)
# Create displacement maps
ori_normed_disp_field_maps = math_utils.create_displacement_field_maps(ori_image.shape[:2], config.DISP_MAP_COUNT,
config.DISP_MODES, config.DISP_GAUSS_MU_RANGE,
config.DISP_GAUSS_SIG_SCALING) # TODO: uncomment
# ori_normed_disp_field_maps = np.zeros((config.DISP_MAP_COUNT, ori_image.shape[0], ori_image.shape[1], 2)) # TODO: remove
# # TODO: remove
# np.random.seed(seed=0)
# colors = np.random.randint(0, 255, size=(len(downsampling_factors), 3), dtype=np.uint8)
for index, downsampling_factor in enumerate(downsampling_factors):
print("downsampling_factor: {}".format(downsampling_factor))
# Downsample ground-truth
image, gt_polygons, normed_disp_field_maps = downsample_gt_data(ori_image, ori_gt_polygons, ori_normed_disp_field_maps, downsampling_factor)
spatial_shape = image.shape[:2]
# Random color
# image = np.tile(colors[index], reps=[image.shape[0], image.shape[1], 1]) # TODO: remove
# Draw gt polygon map
gt_polygon_map = polygon_utils.draw_polygon_map(gt_polygons, spatial_shape, fill=True, edges=True,
vertices=True)
# Generate final displacement
disp_polygons_list, disp_polygon_maps = generate_disp_data(normed_disp_field_maps, gt_polygons,
disp_max_abs_value, spatial_shape)
# Compress data
gt_polygons = [polygon.astype(np.float16) for polygon in gt_polygons]
disp_polygons_list = [[polygon.astype(np.float16) for polygon in polygons] for polygons in disp_polygons_list]
disp_field_maps = normed_disp_field_maps * 32767 # int16 max value = 32767
disp_field_maps = np.round(disp_field_maps)
disp_field_maps = disp_field_maps.astype(np.int16)
# Cut sample into patches
if include_polygons:
patches = process_sample_into_patches(patch_stride, patch_res, image, gt_polygon_map,
disp_field_maps, disp_polygon_maps,
gt_polygons, disp_polygons_list)
else:
patches = process_sample_into_patches(patch_stride, patch_res, image, gt_polygon_map,
disp_field_maps, disp_polygon_maps)
for patch in patches:
save_patch_to_tfrecord(patch, downsampling_factor_writers[downsampling_factor])
return True
def process_dataset(dataset_fold, dataset_raw_dirpath,
image_info_list, overwrite_polygon_dir_name, patch_stride, patch_res,
data_aug_rot,
downsampling_factors,
disp_max_abs_value):
print("Processing images from {}".format(dataset_raw_dirpath))
for image_index, image_info in enumerate(image_info_list):
print("Processing city {}. Progression: {}/{}"
.format(image_info["city"], image_index + 1, len(image_info_list)))
if "number" in image_info:
# This is one image
tile_info_list = [image_info]
elif "numbers" in image_info:
# This is multiple images
tile_info_list = [
{
"city": image_info["city"],
"number": number,
"min_downsampling_factor": image_info["min_downsampling_factor"],
}
for number in image_info["numbers"]
]
else:
print_utils.print_warning(
"WARNING: image_info dict should have one of those keys: \"number\" or \"numbers\"")
tile_info_list = []
for tile_info in tile_info_list:
image_name = read.IMAGE_NAME_FORMAT.format(city=tile_info["city"], number=tile_info["number"])
print("Processing city {}, number {}"
.format(tile_info["city"], tile_info["number"]))
include_polygons = (dataset_fold == "val" or dataset_fold == "test")
if data_aug_rot and dataset_fold == "train":
# Account for data augmentation when rotating patches on the training set
adjusted_patch_res = math.ceil(patch_res * math.sqrt(2))
adjusted_patch_stride = math.floor(
patch_stride * math.sqrt(
2) / 2) # Divided by 2 so that no pixels are left out when rotating by 45 degrees
else:
adjusted_patch_res = patch_res
adjusted_patch_stride = patch_stride
# Filter out downsampling_factors that are lower than city_min_downsampling_factor
image_downsampling_factors = [downsampling_factor for downsampling_factor in downsampling_factors if
tile_info["min_downsampling_factor"] <= downsampling_factor]
# Create writers
writers = {}
for downsampling_factor in downsampling_factors:
filename_format = os.path.join(config.TFRECORDS_DIR,
config.TFRECORD_FILEPATH_FORMAT.format(dataset_fold, image_name,
downsampling_factor))
shard_writer = dataset_utils.TFRecordShardWriter(filename_format, config.RECORDS_PER_SHARD)
writers[downsampling_factor] = shard_writer
process_image(dataset_raw_dirpath, tile_info, overwrite_polygon_dir_name,
adjusted_patch_stride, adjusted_patch_res,
image_downsampling_factors,
disp_max_abs_value,
include_polygons,
writers)
# Close writers
for downsampling_factor in downsampling_factors:
writers[downsampling_factor].close()
def save_metadata(meta_data_filepath, disp_max_abs_value, downsampling_factors):
data = {
"disp_max_abs_value": disp_max_abs_value,
"downsampling_factors": downsampling_factors,
}
with open(meta_data_filepath, 'w') as outfile:
json.dump(data, outfile)
def main():
random.seed(0)
# Create dataset tfrecords directory of it does not exist
if not os.path.exists(config.TFRECORDS_DIR):
os.makedirs(config.TFRECORDS_DIR)
# Save meta-data
meta_data_filepath = os.path.join(config.TFRECORDS_DIR, "metadata.txt")
save_metadata(meta_data_filepath, config.DISP_MAX_ABS_VALUE, config.DOWNSAMPLING_FACTORS)
# Save data
process_dataset("train",
config.DATASET_RAW_DIRPATH,
config.TRAIN_IMAGES,
config.DATASET_OVERWRITE_POLYGON_DIR_NAME,
config.TILE_STRIDE,
config.TILE_RES,
config.DATA_AUG_ROT,
config.DOWNSAMPLING_FACTORS,
config.DISP_MAX_ABS_VALUE)
process_dataset("val",
config.DATASET_RAW_DIRPATH,
config.VAL_IMAGES,
config.DATASET_OVERWRITE_POLYGON_DIR_NAME,
config.TILE_STRIDE,
config.TILE_RES,
config.DATA_AUG_ROT,
config.DOWNSAMPLING_FACTORS,
config.DISP_MAX_ABS_VALUE)
process_dataset("test",
config.DATASET_RAW_DIRPATH,
config.TEST_IMAGES,
config.DATASET_OVERWRITE_POLYGON_DIR_NAME,
config.TILE_STRIDE,
config.TILE_RES,
config.DATA_AUG_ROT,
config.DOWNSAMPLING_FACTORS,
config.DISP_MAX_ABS_VALUE)
if __name__ == "__main__":
main()
| 16,812 | 46.360563 | 182 | py |
mapalignment | mapalignment-master/projects/mapalign/dataset_utils/config_mapping_challenge_multires.py | import os
def choose_first_existing_dir(dir_list):
for dir in dir_list:
if os.path.exists(dir):
return dir
return None
ROOT_DIR = choose_first_existing_dir([
"/local/shared/epitome-polygon-deep-learning", # Inria cluster node nefgpu23
"/home/nigirard/epitome-polygon-deep-learning", # Landsat
"/workspace", # Docker (mainly when using Deepsat or personal computer)
])
print("ROOT_DIR: {}".format(ROOT_DIR))
# Dataset offline pre-processing
DATASET_DIRPATH = os.path.join(ROOT_DIR, "data/mapping_challenge_dataset")
DATASET_RAW_DIRPATH = os.path.join(DATASET_DIRPATH, "raw")
TFRECORDS_DIR = os.path.join(DATASET_DIRPATH, "tfrecords.mapalign.multires")
TFRECORD_FILENAME_FORMAT = "{}.ds_fac_{:02d}.{{:06d}}.tfrecord"
DISP_GLOBAL_SHAPE = (5000, 5000) # As Aerial Inria Dataset images
DISP_PATCH_RES = 300
DISP_MAP_COUNT = 1 # Number of displacement applied to polygons to generate to the displaced gt map (1 minimum, more for dataset augmentation)
DISP_MODES = 30 # Number of Gaussians mixed up to make the displacement map
DISP_GAUSS_MU_RANGE = [0, 1] # Coordinates are normalized to [0, 1] before the function is applied
DISP_GAUSS_SIG_SCALING = [0.0, 0.002] # Coordinates are normalized to [0, 1] before the function is applied
DISP_MAX_ABS_VALUE = 4
# DISP_MAP_MARGIN = 100 # Some polygons are slightly outside the image, this margin allows to take them into account
# Tile generation
REFERENCE_PIXEL_SIZE = 0.3 # In meters.
DOWNSAMPLING_FACTORS = [1, 2, 4, 8]
# The resulting pixel sizes will be equal to [REFERENCE_PIXEL_SIZE/DOWNSAMPLING_FACTOR for DOWNSAMPLING_FACTOR in DOWNSAMPLING_FACTORS]
# Split data into TRAIN and VAL with the already-made split of the data
# Split large tfrecord into several smaller tfrecords (shards)
RECORDS_PER_SHARD = 100
| 1,817 | 41.27907 | 143 | py |
mapalignment | mapalignment-master/projects/mapalign/dataset_utils/config_aerial_image_multires.py | import os
def choose_first_existing_dir(dir_list):
for dir in dir_list:
if os.path.exists(dir):
return dir
return None
ROOT_DIR = choose_first_existing_dir([
"/local/shared/epitome-polygon-deep-learning", # Inria cluster node nefgpu23
"/home/nigirard/epitome-polygon-deep-learning", # Landsat
"/workspace", # Docker (mainly when using Deepsat or personal computer)
])
print("ROOT_DIR: {}".format(ROOT_DIR))
# Dataset offline pre-processing
DATASET_DIRPATH = os.path.join(ROOT_DIR, "data/AerialImageDataset")
DATASET_RAW_DIRPATH = os.path.join(DATASET_DIRPATH, "raw")
DATASET_OVERWRITE_POLYGON_DIR_NAME = None # Can be "aligned_noisy_gt_polygons_1"
TILE_RES = 220 # The maximum patch size will be 220. Adjusted for rotation will be ceil(220*sqrt(2)) = 312.
TILE_STRIDE = 100 # The maximum inner path res will be 100
# If True, generates patches with increased size to account for cropping during the online processing step
DATA_AUG_ROT = True # data_aug_rot only applies to train
TFRECORDS_DIR = os.path.join(DATASET_DIRPATH, "tfrecords.mapalign.multires.aligned_noisy_1")
TFRECORD_FILEPATH_FORMAT = "{}/{}/ds_fac_{:02d}.{{:06d}}.tfrecord" # Fold, image name, ds_fac, shard number
DISP_MAP_COUNT = 1 # Number of displacement applied to polygons to generate the displaced gt map (1 minimum, more for dataset augmentation)
DISP_MODES = 30 # Number of Gaussians mixed up to make the displacement map (Default: 20)
DISP_GAUSS_MU_RANGE = [0, 1] # Coordinates are normalized to [0, 1] before the function is applied
DISP_GAUSS_SIG_SCALING = [0.0, 0.002] # Coordinates are normalized to [0, 1] before the function is applied
DISP_MAX_ABS_VALUE = 4 # In pixels in the downsampled resolutions.
# DISP_MAX_ABS_VALUE = 2 # In pixels in the downsampled resolutions.
# Tile generation
DOWNSAMPLING_FACTORS = [1, 2, 4, 8, 16]
# For 16, 5000x5000px images will be rescaled to 312x312px. Which corresponds to the rotation-adjusted tile_res
TRAIN_IMAGES = [
{
"city": "bloomington",
"numbers": list(range(1, 37)),
"min_downsampling_factor": 1, # Default: 4
},
{
"city": "bellingham",
"numbers": list(range(1, 37)),
"min_downsampling_factor": 1, # Default: 4
},
{
"city": "innsbruck",
"numbers": list(range(1, 37)),
"min_downsampling_factor": 1, # Default: 2
},
{
"city": "sfo",
"numbers": list(range(1, 37)),
"min_downsampling_factor": 1, # Default: 4
},
{
"city": "tyrol-e",
"numbers": list(range(1, 37)),
"min_downsampling_factor": 1, # Default: 4
},
{
"city": "austin",
"numbers": list(range(1, 37)),
"min_downsampling_factor": 1, # Default: 1
},
{
"city": "chicago",
"numbers": list(range(1, 37)),
"min_downsampling_factor": 1, # Default: 1
},
{
"city": "kitsap",
"numbers": list(range(1, 37)),
"min_downsampling_factor": 1, # Default: 4
},
{
"city": "tyrol-w",
"numbers": list(range(1, 37)),
"min_downsampling_factor": 1, # Default: 2
},
{
"city": "vienna",
"numbers": list(range(1, 37)),
"min_downsampling_factor": 1, # Default: 2
},
]
VAL_IMAGES = []
TEST_IMAGES = []
# Split large tfrecord into several smaller tfrecords (shards)
RECORDS_PER_SHARD = 100
| 3,439 | 33.747475 | 140 | py |
mapalignment | mapalignment-master/projects/mapalign/dataset_utils/preprocess_mapping_challenge_multires.py | import sys
import os
import json
import skimage.transform
import skimage.draw
import numpy as np
# from PIL import Image, ImageDraw
# Image.MAX_IMAGE_PIXELS = 200000000
import tensorflow as tf
import config_mapping_challenge_multires as config
sys.path.append("../../../data/mapping_challenge_dataset")
import read
# sys.path.append("../utils")
# import visualization
sys.path.append("../../utils")
import tf_utils
import polygon_utils
import image_utils
# import python_utils
import math_utils
import dataset_utils
# if python_utils.module_exists("matplotlib.pyplot"):
# import matplotlib.pyplot as plt
def downsample_gt_data(image, metadata, gt_polygons, normed_disp_field_maps, downsampling_factor):
# First, correct the downsampling_factor so that:
# A downsampling_factor of 1 results in a final pixel_size equal to config.REFERENCE_PIXEL_SIZE
# A downsampling_factor of 2 results in a final pixel_size equal to 2 * config.REFERENCE_PIXEL_SIZE
corrected_downsampling_factor = downsampling_factor * config.REFERENCE_PIXEL_SIZE / metadata["pixelsize"]
scale = 1 / corrected_downsampling_factor
downsampled_image = skimage.transform.rescale(image, scale, order=3, preserve_range=True, multichannel=True)
downsampled_image = downsampled_image.astype(image.dtype)
downsampled_gt_polygons = polygon_utils.rescale_polygon(gt_polygons, scale)
downsampled_normed_disp_field_maps = np.empty((normed_disp_field_maps.shape[0],
round(normed_disp_field_maps.shape[1] / corrected_downsampling_factor),
round(normed_disp_field_maps.shape[2] / corrected_downsampling_factor),
normed_disp_field_maps.shape[3]))
for i in range(normed_disp_field_maps.shape[0]):
downsampled_normed_disp_field_maps[i] = skimage.transform.rescale(normed_disp_field_maps[i],
scale, order=3,
preserve_range=True, multichannel=True)
return downsampled_image, downsampled_gt_polygons, downsampled_normed_disp_field_maps
def generate_disp_data(normed_disp_field_maps, gt_polygons, disp_max_abs_value, spatial_shape):
scaled_disp_field_maps = normed_disp_field_maps * disp_max_abs_value
disp_polygons_list = polygon_utils.apply_displacement_fields_to_polygons(gt_polygons,
scaled_disp_field_maps)
disp_polygon_maps = polygon_utils.draw_polygon_maps(disp_polygons_list, spatial_shape, fill=True,
edges=True, vertices=True)
return disp_polygons_list, disp_polygon_maps
def save_patch_to_tfrecord(patch, shard_writer):
# print(patch["disp_field_maps"].min() / 2147483647, patch["disp_field_maps"].max() / 2147483647)
# visualization.plot_field_map("disp_field_map", patch["disp_field_maps"][0])
# Compress image into jpg
image_raw = image_utils.convert_array_to_jpg_bytes(patch["image"], mode="RGB")
gt_polygon_map_raw = patch["gt_polygon_map"].tostring() # TODO: convert to png
disp_field_maps_raw = patch["disp_field_maps"].tostring()
disp_polygon_maps_raw = patch[
"disp_polygon_maps"].tostring() # TODO: convert to png (have to handle several png images...)
if patch["gt_polygons"] is not None and patch["disp_polygons"] is not None:
gt_polygons_raw = patch["gt_polygons"].tostring()
disp_polygons_raw = patch["disp_polygons"].tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'tile_res': tf_utils.int64_feature(patch["tile_res"]),
'disp_map_count': tf_utils.int64_feature(patch["disp_map_count"]),
'image': tf_utils.bytes_feature(image_raw),
'gt_polygon_count': tf_utils.int64_feature(patch["gt_polygons"].shape[0]),
'gt_polygon_length': tf_utils.int64_feature(patch["gt_polygons"].shape[1]),
'gt_polygons': tf_utils.bytes_feature(gt_polygons_raw),
'disp_polygons': tf_utils.bytes_feature(disp_polygons_raw),
'gt_polygon_map': tf_utils.bytes_feature(gt_polygon_map_raw),
'disp_field_maps': tf_utils.bytes_feature(disp_field_maps_raw),
'disp_polygon_maps': tf_utils.bytes_feature(disp_polygon_maps_raw)
}))
else:
example = tf.train.Example(features=tf.train.Features(feature={
'tile_res': tf_utils.int64_feature(patch["tile_res"]),
'disp_map_count': tf_utils.int64_feature(patch["disp_map_count"]),
'image': tf_utils.bytes_feature(image_raw),
'gt_polygon_map': tf_utils.bytes_feature(gt_polygon_map_raw),
'disp_field_maps': tf_utils.bytes_feature(disp_field_maps_raw),
'disp_polygon_maps': tf_utils.bytes_feature(disp_polygon_maps_raw),
}))
shard_writer.write(example.SerializeToString())
def process_image(reader, image_id, downsampling_factors, disp_field_maps_patch_creator, disp_max_abs_value,
include_polygons,
downsampling_factor_writers):
"""
Writes to all the writers (one for each resolution) all sample patches extracted from the image_info.
:param reader:
:param image_id:
:param downsampling_factors:
:param disp_field_maps_patch_creator:
:param disp_max_abs_value:
:param include_polygons:
:param downsampling_factor_writers:
:return:
"""
ori_image, ori_metadata, ori_gt_polygons = reader.load_gt_data(image_id)
ori_gt_polygons = polygon_utils.polygons_remove_holes(ori_gt_polygons) # TODO: Remove
# Remove redundant vertices
ori_gt_polygons = polygon_utils.simplify_polygons(ori_gt_polygons, tolerance=1)
# visualization.init_figures(["gt_data"], figsize=(60, 40))
# visualization.plot_example_polygons("gt_data", ori_image, ori_gt_polygons)
# Get displacement maps
ori_normed_disp_field_maps = disp_field_maps_patch_creator.get_patch()
# ori_normed_disp_field_maps = np.zeros((config.DISP_MAP_COUNT, ori_image.shape[0], ori_image.shape[1], 2)) # TODO: remove
# # TODO: remove
# np.random.seed(seed=0)
# colors = np.random.randint(0, 255, size=(len(downsampling_factors), 3), dtype=np.uint8)
for index, downsampling_factor in enumerate(downsampling_factors):
# print("downsampling_factor: {}".format(downsampling_factor))
# Downsample ground-truth
image, gt_polygons, normed_disp_field_maps = downsample_gt_data(ori_image, ori_metadata, ori_gt_polygons,
ori_normed_disp_field_maps, downsampling_factor)
spatial_shape = image.shape[:2]
# Random color
# image = np.tile(colors[index], reps=[image.shape[0], image.shape[1], 1]) # TODO: remove
# Draw gt polygon map
gt_polygon_map = polygon_utils.draw_polygon_map(gt_polygons, spatial_shape, fill=True, edges=True,
vertices=True)
# Generate final displacement
disp_polygons_list, disp_polygon_maps = generate_disp_data(normed_disp_field_maps, gt_polygons,
disp_max_abs_value, spatial_shape)
if gt_polygons[0][0][0] == np.nan or gt_polygons[0][0][1] == np.nan:
print(gt_polygons[0][0])
if disp_polygons_list[0][0][0][0] == np.nan or disp_polygons_list[0][0][0][1] == np.nan:
print("disp_polygons_list:")
print(disp_polygons_list[0][0])
# Compress data
gt_polygons = [polygon.astype(np.float16) for polygon in gt_polygons]
disp_polygons_list = [[polygon.astype(np.float16) for polygon in polygons] for polygons in disp_polygons_list]
disp_field_maps = normed_disp_field_maps * 32767 # int16 max value = 32767
disp_field_maps = np.round(disp_field_maps)
disp_field_maps = disp_field_maps.astype(np.int16)
if include_polygons:
gt_polygons, \
disp_polygons_array = polygon_utils.prepare_polygons_for_tfrecord(gt_polygons, disp_polygons_list)
else:
gt_polygons = disp_polygons_array = None
assert image.shape[0] == image.shape[1], "image should be square otherwise tile_res cannot be defined"
tile_res = image.shape[0]
disp_map_count = disp_polygon_maps.shape[0]
patch = {
"tile_res": tile_res,
"disp_map_count": disp_map_count,
"image": image,
"gt_polygons": gt_polygons,
"disp_polygons": disp_polygons_array,
"gt_polygon_map": gt_polygon_map,
"disp_field_maps": disp_field_maps,
"disp_polygon_maps": disp_polygon_maps,
}
save_patch_to_tfrecord(patch, downsampling_factor_writers[downsampling_factor])
return True
def process_dataset(dataset_fold,
dataset_raw_dirpath,
downsampling_factors,
disp_max_abs_value):
print("Processing images from {}".format(dataset_raw_dirpath))
# Create shard writers
shard_writers = {}
for downsampling_factor in downsampling_factors:
filename_format = os.path.join(config.TFRECORDS_DIR,
config.TFRECORD_FILENAME_FORMAT.format(dataset_fold, downsampling_factor))
shard_writer = dataset_utils.TFRecordShardWriter(filename_format, config.RECORDS_PER_SHARD)
shard_writers[downsampling_factor] = shard_writer
# Create reader
reader = read.Reader(dataset_raw_dirpath, dataset_fold)
# Create DispFieldMapsPatchCreator
disp_field_maps_patch_creator = math_utils.DispFieldMapsPatchCreator(config.DISP_GLOBAL_SHAPE, config.DISP_PATCH_RES, config.DISP_MAP_COUNT, config.DISP_MODES, config.DISP_GAUSS_MU_RANGE, config.DISP_GAUSS_SIG_SCALING)
for image_index, image_id in enumerate(reader.image_id_list):
if (image_index + 1) % 10 == 0:
print("Processing progression: {}/{}"
.format(image_index + 1, len(reader.image_id_list)))
include_polygons = (dataset_fold == "val" or dataset_fold == "test")
process_image(reader, image_id,
downsampling_factors,
disp_field_maps_patch_creator,
disp_max_abs_value,
include_polygons,
shard_writers)
# Close writers
for downsampling_factor in downsampling_factors:
shard_writers[downsampling_factor].close()
def save_metadata(meta_data_filepath, disp_max_abs_value, downsampling_factors):
data = {
"disp_max_abs_value": disp_max_abs_value,
"downsampling_factors": downsampling_factors,
}
with open(meta_data_filepath, 'w') as outfile:
json.dump(data, outfile)
def main():
# input("Prepare dataset, overwrites previous data. This can take a while (1h), press <Enter> to continue...")
# Create dataset tfrecords directory of it does not exist
if not os.path.exists(config.TFRECORDS_DIR):
os.makedirs(config.TFRECORDS_DIR)
# Save meta-data
meta_data_filepath = os.path.join(config.TFRECORDS_DIR, "metadata.txt")
save_metadata(meta_data_filepath, config.DISP_MAX_ABS_VALUE,
config.DOWNSAMPLING_FACTORS)
process_dataset("train",
config.DATASET_RAW_DIRPATH,
config.DOWNSAMPLING_FACTORS,
config.DISP_MAX_ABS_VALUE)
process_dataset("val",
config.DATASET_RAW_DIRPATH,
config.DOWNSAMPLING_FACTORS,
config.DISP_MAX_ABS_VALUE)
if __name__ == "__main__":
main()
| 11,965 | 43.816479 | 222 | py |
mapalignment | mapalignment-master/projects/mapalign/dataset_utils/dataset_multires.py | import sys
import os
import math
import tensorflow as tf
sys.path.append("../utils") # Mapalign sub-projects utils
import visualization
import skimage.io
sys.path.append("../../utils") # Projects utils
import tf_utils
import python_utils
# --- Param --- #
STRING_QUEUE_CAPACITY = 4000
MIN_QUEUE_EXAMPLES = 2000
# --- --- #
def all_items_are_integers(l):
result = True
for i in l:
if type(i) is not int:
result = False
break
return result
def get_all_shards(shard_filepath_format):
shard_filepath_list = []
shard_index = 0
stop = False
while not stop:
shard_filepath = shard_filepath_format.format(shard_index)
if os.path.exists(shard_filepath):
shard_filepath_list.append(shard_filepath)
shard_index += 1
else:
stop = True
return shard_filepath_list
def create_dataset_filename_list(tfrecords_dir_list, tfrecord_filename_format, downsampling_factors, dataset="train",
resolution_file_repeats=None):
if resolution_file_repeats is None:
resolution_file_repeats = [1] * len(downsampling_factors)
assert len(downsampling_factors) == len(resolution_file_repeats), \
"Downsampling_factors and sample_resolution_prob_weights must have the same number of elements"
assert all_items_are_integers(resolution_file_repeats), "All repeat count should be integers"
dataset_filename_list = []
for tfrecords_dir in tfrecords_dir_list:
# Find dataset dir
dataset_dir = os.path.join(tfrecords_dir, dataset)
# Find all images in dataset dir
image_dir_name_list = os.listdir(dataset_dir)
for image_dir_name in image_dir_name_list:
image_dir = os.path.join(dataset_dir, image_dir_name)
for downsampling_factor, resolution_file_repeat in zip(downsampling_factors, resolution_file_repeats):
shard_filepath_format = os.path.join(image_dir, tfrecord_filename_format.format(downsampling_factor))
shard_filepath_list = get_all_shards(shard_filepath_format)
repeated_filepaths = shard_filepath_list * resolution_file_repeat # Repeat filepaths
dataset_filename_list.extend(repeated_filepaths)
return dataset_filename_list
def rotate_poly_map(poly_map, angle):
# Apply NEAREST to corner channels and BILINEAR to the others
gt_polygon_map_area, gt_polygon_map_edges, gt_polygon_corners = tf.unstack(poly_map, axis=-1)
gt_polygon_map_area = tf.contrib.image.rotate(gt_polygon_map_area, angle, interpolation='BILINEAR')
gt_polygon_map_edges = tf.contrib.image.rotate(gt_polygon_map_edges, angle, interpolation='BILINEAR')
gt_polygon_corners = tf.contrib.image.rotate(gt_polygon_corners, angle, interpolation='NEAREST')
poly_map = tf.stack([gt_polygon_map_area, gt_polygon_map_edges, gt_polygon_corners], axis=-1)
return poly_map
def rotate_field_vectors(field_map, angle):
"""
Just rotates every vector of disp_field_map by angle. Does not rotate the spatial support (which is rotated in rotate_poly_map())
:param field_map:
:param angle: (in rad.)
:return:
"""
field_map_shape = tf.shape(field_map) # Save shape for later reshape
tile_resfield_map = tf.reshape(field_map, [-1, 2]) # Convert to a list of vectors
rot_mat = tf.cast(tf.stack([(tf.cos(-angle), -tf.sin(-angle)), (tf.sin(-angle), tf.cos(-angle))], axis=0),
tf.float32)
tile_resfield_map = tf.cast(tile_resfield_map, tf.float32)
tile_resfield_map = tf.matmul(tile_resfield_map, rot_mat)
tile_resfield_map = tf.reshape(tile_resfield_map,
field_map_shape) # Reshape back to field of vectors
return tile_resfield_map
def crop_or_pad_many(image_list, res):
assert type(res) == int, "type(res) should be int"
image_batch = tf.stack(image_list, axis=0)
cropped_image_batch = tf.image.resize_image_with_crop_or_pad(image=image_batch, target_height=res, target_width=res)
cropped_image_list = tf.unstack(cropped_image_batch, axis=0)
return cropped_image_list
def corners_in_inner_patch(poly_map, patch_inner_res):
cropped_disp_polygon_map = tf.image.resize_image_with_crop_or_pad(image=poly_map,
target_height=patch_inner_res,
target_width=patch_inner_res)
_, _, disp_polygon_map_corners = tf.unstack(cropped_disp_polygon_map, axis=-1)
result = tf.cast(tf.reduce_sum(disp_polygon_map_corners), dtype=tf.bool)
return result
def field_map_flip_up_down(field_map):
field_map = tf.image.flip_up_down(field_map)
field_map_row, field_map_col = tf.unstack(field_map, axis=-1)
field_map = tf.stack([-field_map_row, field_map_col], axis=-1)
return field_map
def drop_components(polygon_map, keep_poly_prob, seed=None):
"""
Randomly removes some connected components from polygon_map (which amounts to removing some polygons).
:param polygon_map: The filtered polygon map raster
:param keep_poly_prob: Probability of a polygon to be kept
:param seed:
:return:
"""
if keep_poly_prob == 1:
# Keep all
return polygon_map
elif keep_poly_prob == 0:
# Remove all
zeroed_polygon_map_zeros = tf.zeros_like(polygon_map)
return zeroed_polygon_map_zeros
try:
with tf.name_scope('drop_components'):
# Compute connected components on the first channel of polygon_map (the polygon fill channel):
connected_components = tf.contrib.image.connected_components(polygon_map[:, :, 0])
# Get maximum component label:
connected_component_max = tf.reduce_max(connected_components)
# Randomize component labels (but keep the background label "0" the same):
connected_components_shape = tf.shape(connected_components)
connected_components = tf.reshape(connected_components, [-1])
## --- Keep a polygon with probability keep_poly_prob --- ##
random_values = tf.random_uniform((connected_component_max,), dtype=tf.float32,
seed=seed) # Don't draw a random number for the background label 0.
random_values = tf.pad(random_values, [[1, 0]], "CONSTANT",
constant_values=1) # Add 1 at the beginning of the array so that the background has a zero probability to be kept
connected_component_random_values = tf.gather(random_values, connected_components)
connected_component_random_values = tf.reshape(connected_component_random_values,
connected_components_shape)
# Threshold randomized components:
mask = tf.expand_dims(
tf.cast(
tf.less(connected_component_random_values, keep_poly_prob),
dtype=tf.float32
),
axis=-1)
# Filter polygon_map with mask:
mask = tf_utils.dilate(mask, filter_size=3) # Dilate to take polygon outlines inside the mask
masked_polygon_map = mask * polygon_map
return masked_polygon_map
except AttributeError:
print(
"WARNING: Tensorflow {} does not have connected_components() implemented. Keeping all components regardless of keep_poly_prob.".format(
tf.__version__))
return polygon_map
def read_and_decode(tfrecord_filepaths, patch_inner_res, patch_outer_res, batch_size,
dynamic_range, disp_map_dynamic_range_fac=0.5, keep_poly_prob=None, data_aug=False, train=True,
seed=None):
"""
Reads examples from the tfrecord.
If train = True, polygon data will not be served as it cannot be shuffled easily (varying-sized tensors).
Set to False for validation and test only (where shuffling does not matter)
:param tfrecord_filepaths:
:param patch_inner_res:
:param patch_outer_res:
:param batch_size:
:param dynamic_range:
:param disp_map_dynamic_range_fac:
:param keep_poly_prob: If not None, the fraction of disp_polygon that are kept
:param data_aug:
:param train:
:return:
"""
assert 0 < len(tfrecord_filepaths), "tfrecord_filepaths should contain at least one element"
with tf.name_scope('read_and_decode'):
filename_queue = tf.train.string_input_producer(tfrecord_filepaths, shuffle=True, seed=seed, capacity=STRING_QUEUE_CAPACITY + 3 * batch_size)
# reader_options = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.GZIP)
# reader = tf.TFRecordReader(options=reader_options)
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
if train:
features = tf.parse_single_example(
serialized_example,
# Defaults are not specified since both keys are required.
features={
'tile_res': tf.FixedLenFeature([], tf.int64),
'disp_map_count': tf.FixedLenFeature([], tf.int64),
'image': tf.FixedLenFeature([], tf.string),
'gt_polygon_map': tf.FixedLenFeature([], tf.string),
'disp_field_maps': tf.FixedLenFeature([], tf.string),
'disp_polygon_maps': tf.FixedLenFeature([], tf.string)
})
disp_map_count = tf.cast(features['disp_map_count'], tf.int64)
gt_polygons = None
disp_polygons_array = None
else:
features = tf.parse_single_example(
serialized_example,
# Defaults are not specified since both keys are required.
features={
'tile_res': tf.FixedLenFeature([], tf.int64),
'disp_map_count': tf.FixedLenFeature([], tf.int64),
'image': tf.FixedLenFeature([], tf.string),
'gt_polygon_count': tf.FixedLenFeature([], tf.int64),
'gt_polygon_length': tf.FixedLenFeature([], tf.int64),
'gt_polygons': tf.FixedLenFeature([], tf.string),
'disp_polygons': tf.FixedLenFeature([], tf.string),
'gt_polygon_map': tf.FixedLenFeature([], tf.string),
'disp_field_maps': tf.FixedLenFeature([], tf.string),
'disp_polygon_maps': tf.FixedLenFeature([], tf.string)
})
disp_map_count = tf.cast(features['disp_map_count'], tf.int64)
gt_polygon_count = tf.cast(features['gt_polygon_count'], tf.int64)
gt_polygon_length = tf.cast(features['gt_polygon_length'], tf.int64)
gt_polygons_flat = tf.decode_raw(features['gt_polygons'], tf.float16)
disp_polygons_flat = tf.decode_raw(features['disp_polygons'], tf.float16)
gt_polygons_shape = tf.stack([gt_polygon_count, gt_polygon_length, 2])
gt_polygons = tf.reshape(gt_polygons_flat, gt_polygons_shape)
disp_polygons_shape = tf.stack([disp_map_count, gt_polygon_count, gt_polygon_length, 2])
disp_polygons_array = tf.reshape(disp_polygons_flat, disp_polygons_shape)
tile_res = tf.cast(features['tile_res'], tf.int64)
image_flat = tf.image.decode_jpeg(features['image']) # TODO: use dct_method="INTEGER_ACCURATE"?
gt_polygon_map_flat = tf.decode_raw(features['gt_polygon_map'], tf.uint8)
disp_field_maps_flat = tf.decode_raw(features['disp_field_maps'], tf.int16)
disp_polygon_maps_flat = tf.decode_raw(features['disp_polygon_maps'], tf.uint8)
# return image_flat, None, None, gt_polygon_map_flat, disp_field_maps_flat, disp_polygon_maps_flat
# Reshape tensors
image_shape = tf.stack([tile_res, tile_res, 3])
gt_polygon_map_shape = tf.stack([tile_res, tile_res, 3])
disp_field_maps_shape = tf.stack([disp_map_count, tile_res, tile_res, 2])
disp_polygon_maps_shape = tf.stack([disp_map_count, tile_res, tile_res, 3])
image = tf.reshape(image_flat, image_shape)
gt_polygon_map = tf.reshape(gt_polygon_map_flat, gt_polygon_map_shape)
disp_field_maps = tf.reshape(disp_field_maps_flat, disp_field_maps_shape)
disp_polygon_maps = tf.reshape(disp_polygon_maps_flat, disp_polygon_maps_shape)
# return image, None, None, gt_polygon_map, disp_field_maps, disp_polygon_maps
# Choose disp map:
disp_map_index = tf.random_uniform([], maxval=disp_map_count, dtype=tf.int64, seed=seed)
disp_polygons = None
if not train:
disp_polygons = disp_polygons_array[disp_map_index, :, :, :]
disp_field_map = disp_field_maps[disp_map_index, :, :, :]
disp_polygon_map = disp_polygon_maps[disp_map_index, :, :, :]
# return image, None, None, gt_polygon_map, tf.expand_dims(disp_field_map, axis=0), tf.expand_dims(disp_polygon_map, axis=0)
# Normalize data
image = image / 255
gt_polygon_map = gt_polygon_map / 255
disp_polygon_map = disp_polygon_map / 255
disp_field_map = disp_map_dynamic_range_fac * tf.cast(disp_field_map,
dtype=tf.float32) / 32767 # Within [-disp_map_dynamic_range_fac, disp_map_dynamic_range_fac]
if keep_poly_prob is not None:
# Remove some polygons from disp_polygon_map
disp_polygon_map = drop_components(disp_polygon_map, keep_poly_prob, seed=seed)
# return tf.expand_dims(image, axis=0), gt_polygons, disp_polygons, tf.expand_dims(gt_polygon_map, axis=0), tf.expand_dims(disp_field_map, axis=0), tf.expand_dims(disp_polygon_map, axis=0)
# Perturb image brightness, contrast, saturation, etc.
if data_aug:
image = tf.image.random_brightness(image, 0.25)
image = tf.image.random_contrast(image, 0.8, 1.2)
image = tf.image.random_saturation(image, 0.8, 1.2)
# Rotate
if train and data_aug: # data_aug rototation only applies to train (val includes polygons that should be also rotated if you want to augment val as well)
# Pad to avoid losing parts of the image after rotation
rot_patch_outer_res = int(math.ceil(patch_outer_res * math.sqrt(2)))
rot_patch_inner_res = int(math.ceil(patch_inner_res * math.sqrt(2)))
image, gt_polygon_map, disp_polygon_map = crop_or_pad_many([image, gt_polygon_map, disp_polygon_map],
rot_patch_outer_res)
disp_field_map = tf.image.resize_image_with_crop_or_pad(
image=disp_field_map,
target_height=rot_patch_inner_res,
target_width=rot_patch_inner_res)
# Apply the rotations on the spatial support
angle = tf.random_uniform([], maxval=2 * math.pi, dtype=tf.float32, seed=seed)
image = tf.contrib.image.rotate(image, angle, interpolation='BILINEAR')
gt_polygon_map = rotate_poly_map(gt_polygon_map, angle)
disp_polygon_map = rotate_poly_map(disp_polygon_map, angle)
disp_field_map = tf.contrib.image.rotate(disp_field_map, angle, interpolation='BILINEAR')
# Rotate only the vectors for every pixel of disp_field_map
disp_field_map = rotate_field_vectors(disp_field_map, angle)
# Crop to final patch_res
# patch_outer_res = 312
image, gt_polygon_map, disp_polygon_map = crop_or_pad_many([image, gt_polygon_map, disp_polygon_map],
patch_outer_res)
disp_field_map = tf.image.resize_image_with_crop_or_pad(
image=disp_field_map,
target_height=patch_inner_res,
target_width=patch_inner_res)
# Shift dynamic range of image to be in [-1, 1]
image = image * (dynamic_range[1] - dynamic_range[0]) + dynamic_range[0]
image = tf.clip_by_value(image, dynamic_range[0], dynamic_range[1])
# return image, gt_polygons, disp_polygons, gt_polygon_map, disp_field_map, disp_polygon_map
# # Dilate polygon maps
# gt_polygon_map = tf_utils.dilate(gt_polygon_map, filter_size=2)
# disp_polygon_map = tf_utils.dilate(disp_polygon_map, filter_size=2)
if data_aug:
# Apply random flips
flip = tf.random_uniform([], dtype=tf.float16, seed=seed)
flip_outputs = tf.cond(0.5 <= flip,
lambda: (tf.image.flip_up_down(image),
tf.image.flip_up_down(gt_polygon_map),
field_map_flip_up_down(disp_field_map),
tf.image.flip_up_down(disp_polygon_map)),
lambda: (image, gt_polygon_map, disp_field_map, disp_polygon_map))
image, gt_polygon_map, disp_field_map, disp_polygon_map = flip_outputs
# Add batch dimension (to be able to use enqueue_many=True)
image = tf.expand_dims(image, 0)
if not train:
gt_polygons = tf.expand_dims(gt_polygons, 0)
disp_polygons = tf.expand_dims(disp_polygons, 0)
gt_polygon_map = tf.expand_dims(gt_polygon_map, 0)
disp_field_map = tf.expand_dims(disp_field_map, 0)
disp_polygon_map = tf.expand_dims(disp_polygon_map, 0)
# Remove patches with too little data for training (that have no corners in inner patch)
include_patch = corners_in_inner_patch(gt_polygon_map, patch_inner_res)
empty = tf.constant([], tf.int32)
if train:
image, \
gt_polygon_map, \
disp_field_map, \
disp_polygon_map = tf.cond(include_patch,
lambda: [image, gt_polygon_map,
disp_field_map, disp_polygon_map],
lambda: [tf.gather(image, empty),
tf.gather(gt_polygon_map, empty),
tf.gather(disp_field_map, empty),
tf.gather(disp_polygon_map, empty)])
else:
image, \
gt_polygons, \
disp_polygons, \
gt_polygon_map, \
disp_field_map, \
disp_polygon_map = tf.cond(include_patch,
lambda: [image, gt_polygons, disp_polygons, gt_polygon_map, disp_field_map,
disp_polygon_map],
lambda: [
tf.gather(image, empty),
tf.gather(gt_polygons, empty),
tf.gather(disp_polygons, empty),
tf.gather(gt_polygon_map, empty),
tf.gather(disp_field_map, empty),
tf.gather(disp_polygon_map, empty)])
if train:
image_batch, gt_polygon_map_batch, disp_field_map_batch, disp_polygon_map_batch = tf.train.shuffle_batch(
[image, gt_polygon_map, disp_field_map, disp_polygon_map],
batch_size=batch_size,
capacity=MIN_QUEUE_EXAMPLES + 3 * batch_size,
min_after_dequeue=MIN_QUEUE_EXAMPLES,
num_threads=8,
seed=seed,
enqueue_many=True,
allow_smaller_final_batch=False)
return image_batch, None, None, gt_polygon_map_batch, disp_field_map_batch, disp_polygon_map_batch
else:
image_batch, gt_polygons_batch, disp_polygons_batch, gt_polygon_map_batch, disp_field_map_batch, disp_polygon_map_batch = tf.train.batch(
[image, gt_polygons, disp_polygons, gt_polygon_map, disp_field_map, disp_polygon_map],
batch_size=batch_size,
num_threads=8,
dynamic_pad=True,
enqueue_many=True,
allow_smaller_final_batch=False)
return image_batch, gt_polygons_batch, disp_polygons_batch, gt_polygon_map_batch, disp_field_map_batch, disp_polygon_map_batch
def main():
# --- Params --- #
seed = 0
data_dir = python_utils.choose_first_existing_path([
"/local/shared/epitome-polygon-deep-learning/data", # Try local node first
"/home/nigirard/epitome-polygon-deep-learning/data",
"/workspace/data", # Try inside Docker image
])
tfrecords_dir_list = [
# os.path.join(data_dir, "AerialImageDataset/tfrecords.mapalign.multires"),
os.path.join(data_dir, "bradbury_buildings_roads_height_dataset/tfrecords.mapalign.multires"),
# os.path.join(data_dir, "mapping_challenge_dataset/tfrecords.mapalign.multires"),
]
print("tfrecords_dir_list:")
print(tfrecords_dir_list)
# downsampling_factors = [1, 2, 4, 8]
# resolution_file_repeats = [1, 4, 16, 64]
tfrecord_filename_format = "ds_fac_{:02d}.{{:06d}}.tfrecord"
downsampling_factors = [1]
resolution_file_repeats = [1]
dataset_filename_list = create_dataset_filename_list(tfrecords_dir_list, tfrecord_filename_format,
downsampling_factors,
dataset="train",
resolution_file_repeats=resolution_file_repeats)
print("Length of dataset_filename_list:")
print(len(dataset_filename_list))
patch_outer_res = 220
patch_inner_res = 100
padding = (patch_outer_res - patch_inner_res) // 2
disp_max_abs_value = 4
batch_size = 32
dynamic_range = [-1, 1]
keep_poly_prob = 0.1 # Default: 0.1
data_aug = True
train = True
# --- --- #
# Even when reading in multiple threads, share the filename
# queue.
image, gt_polygons, disp_polygons, gt_polygon_map, disp_field_map, disp_polygon_map = read_and_decode(
dataset_filename_list,
patch_inner_res,
patch_outer_res,
batch_size,
dynamic_range,
keep_poly_prob=keep_poly_prob,
data_aug=data_aug,
train=train,
seed=seed)
# The op for initializing the variables.
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
with tf.Session() as sess:
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
# Let's read off 3 batches just for example
for i in range(30000):
print("---- {} ---".format(i))
if train:
image_batch, gt_polygon_map_batch, disp_field_map_batch, disp_polygon_map_batch = sess.run(
[image, gt_polygon_map, disp_field_map, disp_polygon_map])
else:
image_batch, gt_polygons_batch, disp_polygons_batch, gt_polygon_map_batch, disp_field_map_batch, disp_polygon_map_batch = sess.run(
[image, gt_polygons, disp_polygons, gt_polygon_map, disp_field_map, disp_polygon_map])
print(gt_polygons_batch[0, 0, 0, :])
print(disp_polygons_batch[0, 0, 0, :])
print(gt_polygons_batch.shape)
print(disp_polygons_batch.shape)
print(image_batch.shape)
print(gt_polygon_map_batch.shape)
print(disp_field_map_batch.shape)
print(disp_polygon_map_batch.shape)
# np.set_printoptions(threshold=np.nan)
# print(image_batch)
# print(gt_polygon_map_batch)
# print(disp_field_map_batch)
# print(disp_polygon_map_batch)
print("image_batch:")
print(image_batch.min())
print(image_batch.max())
print("gt_polygon_map_batch:")
print(gt_polygon_map_batch.min())
print(gt_polygon_map_batch.max())
try:
print(disp_field_map_batch[:, :, :, 0].min())
print(disp_field_map_batch[:, :, :, 0].max())
except IndexError:
print("Skip min and max of disp_field_map_batch because of wrong rank")
# visualization.plot_field_map("disp_field_map", disp_field_map_batch[0])
print("disp_polygon_map_batch:")
print(disp_polygon_map_batch.min())
print(disp_polygon_map_batch.max())
dynamic_range = [-1, 1]
image_batch = (image_batch - dynamic_range[0]) / (
dynamic_range[1] - dynamic_range[0])
disp_field_map_batch = disp_field_map_batch * 2 # Within [-1, 1]
disp_field_map_batch = disp_field_map_batch * disp_max_abs_value # Within [-disp_max_abs_value, disp_max_abs_value]
# gt_polygon_map_batch *= 0 # TODO: Remove
# for batch_index in range(batch_size):
# if train:
# visualization.init_figures(["example"])
# # visualization.plot_example("example",
# # image_batch[batch_index],
# # gt_polygon_map_batch[batch_index],
# # disp_field_map_batch[batch_index],
# # disp_polygon_map_batch[batch_index])
# visualization.plot_example("example",
# image_batch[batch_index],
# disp_polygon_map_batch[batch_index])
# else:
# visualization.init_figures(["example", "example polygons"])
# visualization.plot_example("example",
# image_batch[batch_index],
# gt_polygon_map_batch[batch_index],
# disp_field_map_batch[batch_index],
# disp_polygon_map_batch[batch_index])
# visualization.plot_example_polygons("example polygons",
# image_batch[batch_index],
# gt_polygons_batch[batch_index],
# disp_polygons_batch[batch_index])
# input("Press <Enter> to continue...")
skimage.io.imsave("misaligned_polygon_raster.png", disp_polygon_map_batch[0])
skimage.io.imsave("image.png", image_batch[0])
disp_field_map_image = visualization.flow_to_image(disp_field_map_batch[0])
skimage.io.imsave("displacement_field_map.png", disp_field_map_image)
segmentation = gt_polygon_map_batch[0][padding:-padding, padding:-padding, :]
skimage.io.imsave("segmentation.png", segmentation)
# input("Press <Enter> to continue...")
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
main()
| 27,745 | 47.422339 | 197 | py |
mapalignment | mapalignment-master/projects/mapalign/utils/visualization.py | import os
import sys
import numpy as np
import cv2
current_filepath = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(current_filepath, "../../utils"))
import python_utils
import polygon_utils
# --- Useful when code is executed inside Docker without a display: --- #
# Try importing pyplot:
display_is_available = python_utils.get_display_availability()
use_pyplot = None
if display_is_available:
if python_utils.module_exists("matplotlib.pyplot"):
# This means everything works with the default matplotlib backend
import matplotlib.pyplot as plt
use_pyplot = True
else:
# matplotlib.pyplot is just not available we cannot plot anything
use_pyplot = False
else:
# Try switching backend
import matplotlib
matplotlib.use('Agg')
if python_utils.module_exists("matplotlib.pyplot"):
# The Agg backend works, pyplot is available, we just can't display plots to the screen (they'll be saved to file anyway)
import matplotlib.pyplot as plt
use_pyplot = True
# --- --- #
import skimage.io
print("#--- Visualization ---#")
print("display_is_available: {}".format(display_is_available))
print("use_pyplot: {}".format(use_pyplot))
def flow_to_image(flow):
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv = np.zeros((flow.shape[0], flow.shape[1], 3))
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 1] = 255
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
hsv = hsv.astype(np.uint8)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return rgb
if use_pyplot:
FIGURE_DICT = {}
def fig_out(figure_name, shape, nonblocking):
plt.margins(0)
plt.axis('off')
axes = plt.gca()
axes.set_xlim([0, shape[1]])
axes.set_ylim([0, shape[0]])
if display_is_available:
if nonblocking:
plt.draw()
plt.pause(0.001)
else:
plt.show()
# plt.savefig("{}.png".format(figure_name), bbox_inches='tight', pad_inches=0)
plt.savefig("{}.png".format(figure_name), pad_inches=0)
def init_figures(figure_names, nonblocking=True, figsize=(4, 4)):
for i, figure_name in enumerate(figure_names):
fig = plt.figure(i, figsize=figsize)
fig.canvas.set_window_title(figure_name)
FIGURE_DICT[figure_name] = i
if nonblocking:
plt.ion()
def plot_image(image):
plt.imshow(image[:, :, :3]) # Remove extra channels if any
def plot_example(figure_name, image, gt_polygon_map, disp_field_map=None, disp_polygon_map=None, nonblocking=True):
patch_outer_res = image.shape[0]
gt_polygon_map = gt_polygon_map.astype(np.float32)
if nonblocking:
fig = plt.figure(FIGURE_DICT[figure_name])
plt.cla()
plot_image(image)
# Overlay GT polygons with 0.5 alpha
shown_gt_polygon_map = np.zeros((patch_outer_res, patch_outer_res, 4))
shown_gt_polygon_map[:, :, :3] = gt_polygon_map
shown_gt_polygon_map[:, :, 3] = np.any(gt_polygon_map, axis=-1) / 2
plt.imshow(shown_gt_polygon_map)
if disp_polygon_map is not None:
disp_polygon_map = disp_polygon_map.astype(np.float32)
disp_polygon_map /= 2
# Overlay displaced polygons with 0.5 alpha
shown_disp_polygon_map = np.zeros((patch_outer_res, patch_outer_res, 4))
shown_disp_polygon_map[:, :, :3] = disp_polygon_map
shown_disp_polygon_map[:, :, 3] = np.any(disp_polygon_map, axis=-1) / 2
plt.imshow(shown_disp_polygon_map)
# Overlay displacement map with 0.5 alpha
if disp_field_map is not None:
patch_inner_res = disp_field_map.shape[0]
patch_padding = (patch_outer_res - patch_inner_res) // 2
shown_disp_field_map_padded = np.zeros((patch_outer_res, patch_outer_res, 4))
shown_disp_field_map = np.empty_like(disp_field_map)
maxi = np.max(np.abs(disp_field_map))
shown_disp_field_map[:, :, 0] = disp_field_map[:, :, 0] / (maxi + 1e-6)
shown_disp_field_map[:, :, 1] = disp_field_map[:, :, 1] / (maxi + 1e-6)
shown_disp_field_map = (shown_disp_field_map + 1) / 2
shown_disp_field_map_padded[patch_padding:-patch_padding, patch_padding:-patch_padding, 1:3] = shown_disp_field_map
shown_disp_field_map_padded[patch_padding:-patch_padding, patch_padding:-patch_padding, 3] = 0.5
plt.imshow(shown_disp_field_map_padded)
# Draw quivers on displaced corners
if disp_polygon_map is not None:
disp_polygon_map_cropped_corners = disp_polygon_map[patch_padding:-patch_padding, patch_padding:-patch_padding, 2]
quiver_indexes = np.where(0 < disp_polygon_map_cropped_corners.max() - 1e-1 < disp_polygon_map_cropped_corners)
if len(quiver_indexes[0]) and len(quiver_indexes[1]):
disp_field_map_corners = disp_field_map[quiver_indexes[0], quiver_indexes[1], :]
plt.quiver(quiver_indexes[1] + patch_padding, quiver_indexes[0] + patch_padding, disp_field_map_corners[:, 1],
disp_field_map_corners[:, 0], scale=1, scale_units="xy", angles="xy", width=0.005, color="purple")
fig_out(figure_name, image.shape, nonblocking)
def plot_example_homography(figure_name, image, aligned_polygon_raster, misaligned_polygon_raster, nonblocking=True):
patch_res = image.shape[0]
aligned_polygon_raster = aligned_polygon_raster.astype(np.float32)
misaligned_polygon_raster = misaligned_polygon_raster.astype(np.float32)
# Darken image and gt_polygon_map
if nonblocking:
fig = plt.figure(FIGURE_DICT[figure_name])
plt.cla()
plot_image(image)
# Overlay aligned_polygon_raster with 0.5 alpha
shown_aligned_polygon_raster = np.zeros((patch_res, patch_res, 4))
shown_aligned_polygon_raster[:, :, 1] = aligned_polygon_raster[:, :, 0]
shown_aligned_polygon_raster[:, :, 3] = aligned_polygon_raster[:, :, 0] / 8
plt.imshow(shown_aligned_polygon_raster)
# Overlay misaligned_polygon_raster with 0.5 alpha
shown_misaligned_polygon_raster = np.zeros((patch_res, patch_res, 4))
shown_misaligned_polygon_raster[:, :, 0] = misaligned_polygon_raster[:, :, 0]
shown_misaligned_polygon_raster[:, :, 3] = misaligned_polygon_raster[:, :, 0] / 8
plt.imshow(shown_misaligned_polygon_raster)
fig_out(figure_name, image.shape, nonblocking)
def plot_polygons(polygons, color):
# print("plot_polygons(polygons, color)") # TODO: remove
for i, polygon in enumerate(polygons):
# Remove coordinates after nans
indexes_of_nans = np.where(np.isnan(polygon[:, 0]))[0]
if len(indexes_of_nans):
polygon_nans_crop = polygon[:indexes_of_nans[-1], :]
polygon_utils.plot_polygon(polygon_nans_crop, color=color, draw_labels=False, indexing="ij")
else:
polygon_utils.plot_polygon(polygon, color=color, draw_labels=False, indexing="ij")
# if 10 < i: # TODO: remove
# break # TODO: remove
def plot_example_polygons(figure_name, image, gt_polygons, disp_polygons=None, aligned_disp_polygons=None, nonblocking=True):
if nonblocking:
fig = plt.figure(FIGURE_DICT[figure_name])
plt.cla()
plot_image(image)
# Draw gt polygons
plot_polygons(gt_polygons, "green")
if disp_polygons is not None:
plot_polygons(disp_polygons, "red")
if aligned_disp_polygons is not None:
plot_polygons(aligned_disp_polygons, "blue")
fig_out(figure_name, image.shape, nonblocking)
def plot_seg(figure_name, image, seg, nonblocking=True):
patch_outer_res = image.shape[0]
patch_inner_res = seg.shape[0]
patch_padding = (patch_outer_res - patch_inner_res) // 2
if 3 < seg.shape[2]:
seg = seg[:, :, 1:4]
# seg = seg.astype(np.float32)
# print(seg.dtype)
# print(seg.shape)
# print(seg.min())
# print(seg.max())
if nonblocking:
fig = plt.figure(FIGURE_DICT[figure_name])
plt.cla()
plot_image(image)
# Overlay GT polygons
shown_seg = np.zeros((patch_outer_res, patch_outer_res, 4))
if 0 < patch_padding:
shown_seg[patch_padding:-patch_padding, patch_padding:-patch_padding, :3] = seg[:, :, :]
shown_seg[patch_padding:-patch_padding, patch_padding:-patch_padding, 3] = np.clip(np.sum(seg[:, :, :], axis=-1), 0, 1)
else:
shown_seg[:, :, :3] = seg[:, :, :]
shown_seg[:, :, 3] = np.clip(
np.sum(seg[:, :, :], axis=-1), 0, 1)
plt.imshow(shown_seg)
fig_out(figure_name, image.shape, nonblocking)
def plot_field_map(figure_name, field_map, nonblocking=True):
assert len(field_map.shape) == 3 and field_map.shape[2] == 2, "field_map should have 3 dimensions like so: [height, width, 2]"
from mpl_toolkits.mplot3d import Axes3D
row = np.linspace(0, 1, field_map.shape[0])
col = np.linspace(0, 1, field_map.shape[1])
rr, cc = np.meshgrid(row, col, indexing='ij')
fig = plt.figure(figsize=(18, 9))
ax = fig.add_subplot(121, projection='3d')
ax.plot_surface(rr, cc, field_map[:, :, 0], rstride=3, cstride=3, linewidth=1, antialiased=True)
ax = fig.add_subplot(122, projection='3d')
ax.plot_surface(rr, cc, field_map[:, :, 1], rstride=3, cstride=3, linewidth=1, antialiased=True)
plt.savefig("{}.png".format(figure_name), pad_inches=0)
else:
def init_figures(figure_names, nonblocking=True):
print("Graphical interface (matplotlib.pyplot) is not available. Will print out relevant values instead of "
"plotting.")
def plot_example(figure_name, image, gt_polygon_map, disp_field_map, disp_polygon_map, nonblocking=True):
print(figure_name)
def plot_example_homography(figure_name, image, aligned_polygon_raster, misaligned_polygon_raster,
nonblocking=True):
print(figure_name)
def plot_example_polygons(figure_name, image, gt_polygons, disp_polygons, aligned_disp_polygons=None, nonblocking=True):
print(figure_name)
# print("gt_polygons:")
# print(gt_polygons)
# print("aligned_disp_polygons:")
# print(aligned_disp_polygons)
def plot_seg(figure_name, image, seg, nonblocking=True):
print(figure_name)
def plot_batch(figure_names, image_batch, gt_polygon_map_batch, disp_field_map_batches, disp_polygon_map_batch, nonblocking=True):
assert len(figure_names) == len(disp_field_map_batches)
# batch_size = gt_polygon_map_batch.shape[0]
# index = random.randrange(batch_size)
index = 0
for figure_name, disp_field_map_batch in zip(figure_names, disp_field_map_batches):
plot_example(figure_name, image_batch[index], gt_polygon_map_batch[index], disp_field_map_batch[index], disp_polygon_map_batch[index], nonblocking=nonblocking)
def plot_batch_polygons(figure_name, image_batch, gt_polygons_batch, disp_polygons_batch, aligned_disp_polygons_batch, nonblocking=True):
# batch_size = image_batch.shape[0]
# index = random.randrange(batch_size)
index = 0
plot_example_polygons(figure_name, image_batch[index], gt_polygons_batch[index], disp_polygons_batch[index], aligned_disp_polygons_batch[index], nonblocking=nonblocking)
def plot_batch_seg(figure_name, image_batch, seg_batch):
# batch_size = image_batch.shape[0]
# index = random.randrange(batch_size)
index = 0
plot_seg(figure_name, image_batch[index], seg_batch[index])
def save_plot_image_polygons(filepath, ori_image, ori_gt_polygons, disp_polygons, aligned_disp_polygons, line_width=1):
spatial_shape = ori_image.shape[:2]
ori_gt_polygons_map = polygon_utils.draw_polygon_map(ori_gt_polygons, spatial_shape, fill=False, edges=True,
vertices=False, line_width=line_width)
disp_polygons_map = polygon_utils.draw_polygon_map(disp_polygons, spatial_shape, fill=False, edges=True,
vertices=False, line_width=line_width)
aligned_disp_polygons_map = polygon_utils.draw_polygon_map(aligned_disp_polygons, spatial_shape, fill=False,
edges=True, vertices=False, line_width=line_width)
output_image = ori_image[:, :, :3] # Keep first 3 channels
output_image = output_image.astype(np.float64)
output_image[np.where(0 < ori_gt_polygons_map[:, :, 0])] = np.array([0, 255, 0])
output_image[np.where(0 < disp_polygons_map[:, :, 0])] = np.array([255, 0, 0])
output_image[np.where(0 < aligned_disp_polygons_map[:, :, 0])] = np.array([0, 0, 255])
# output_image = np.clip(output_image, 0, 255)
output_image = output_image.astype(np.uint8)
skimage.io.imsave(filepath, output_image)
def save_plot_segmentation_image(filepath, segmentation_image):
output_image = np.zeros((segmentation_image.shape[0], segmentation_image.shape[1], 4))
output_image[:, :, :3] = segmentation_image[:, :, 1:4] # Remove background channel
output_image[:, :, 3] = np.sum(segmentation_image[:, :, 1:4], axis=-1) # Add alpha
output_image = output_image * 255
output_image = np.clip(output_image, 0, 255)
output_image = output_image.astype(np.uint8)
skimage.io.imsave(filepath, output_image)
| 13,891 | 40.717718 | 173 | py |
mapalignment | mapalignment-master/projects/mapalign/mapalign_multires/main.py | #####
#
# Quicky align the OSM data of your images with this script
#
####
import sys
import os
import argparse
import skimage.io
import numpy as np
import test
sys.path.append("../../utils")
import run_utils
import print_utils
import geo_utils
# -- Default script arguments: --- #
CONFIG = "config"
IMAGE = "geo_images/test_image.tif"
SHAPEFILE = None
BATCH_SIZE = 12
RUNS_DIRPATH = "runs.igarss2019" # Best models: runs.igarss2019
# Should be in descending order:
DS_FAC_LIST = [
8,
4,
2,
1,
]
# --- Params: --- #
RUN_NAME_FORMAT = "ds_fac_{}_inria_bradbury_all_2" # Best models: ds_fac_{}_inria_bradbury_all_2
# --- --- #
def get_args():
argparser = argparse.ArgumentParser(description=__doc__)
argparser.add_argument(
'-c', '--config',
default=CONFIG,
type=str,
help='Name of the config file, excluding the .json file extension.')
argparser.add_argument(
'-i', '--image',
default=IMAGE,
type=str,
help='Filepath to the GeoTIFF image.')
argparser.add_argument(
'-s', '--shapefile',
default=SHAPEFILE,
type=str,
help='Filepath to the shapefile.')
argparser.add_argument(
'-b', '--batch_size',
default=BATCH_SIZE,
type=int,
help='Batch size. Generally set as large as the VRAM can handle. Default value can be set in config file.')
argparser.add_argument(
'-r', '--runs_dirpath',
default=RUNS_DIRPATH,
type=str,
help='Name of directory where the models can be found.')
argparser.add_argument(
'-d', '--ds_fac',
default=DS_FAC_LIST,
type=int,
nargs='+',
help='Downscaling factors. Should be a list of descending integers. Used to retrieve run names')
argparser.add_argument(
'--pixelsize',
type=float,
help='Set pixel size (in meters) of the image. Useful when the image does not have this value in its metadata.')
args = argparser.parse_args()
return args
def read_image(filepath, pixelsize=None):
image_array = skimage.io.imread(filepath)
if pixelsize is None:
pixelsize = geo_utils.get_pixelsize(filepath)
assert type(pixelsize) == float, "pixelsize should be float, not {}".format(type(pixelsize))
if pixelsize < 1e-3:
print_utils.print_warning("WARNING: pixel size of image is detected to be {}m which seems very small to be correct. "
"If problems occur specify pixelsize with the pixelsize command-line argument".format(pixelsize))
image_metadata = {
"filepath": filepath,
"pixelsize": pixelsize,
}
return image_array, image_metadata
def normalize(image, mu=None, sigma=None):
if mu is None:
mu = np.mean(image)
if sigma is None:
sigma = np.std(image)
return (image - mu) / sigma
def get_osm_annotations(filepath):
filename_no_extension = os.path.splitext(filepath)[0]
npy_filepath = filename_no_extension + ".npy"
if os.path.exists(npy_filepath):
print_utils.print_info("Loading OSM building data from disc...")
gt_polygons = np.load(npy_filepath, allow_pickle=True)
else:
print_utils.print_info("Fetching OSM building data from the internet...")
gt_polygons = geo_utils.get_polygons_from_osm(filepath, tag="building")
# Save npy to avoid re-fetching:
np.save(npy_filepath, gt_polygons)
# Save shapefile for visualisation:
shp_filepath = filename_no_extension + ".shp"
geo_utils.save_shapefile_from_polygons(gt_polygons, filepath, shp_filepath)
return gt_polygons
def get_shapefile_annotations(image_filepath, shapefile_filepath):
polygons, _ = geo_utils.get_polygons_from_shapefile(image_filepath, shapefile_filepath)
return polygons
def save_annotations(image_filepath, polygons):
filename_no_extension = os.path.splitext(image_filepath)[0]
npy_filepath = filename_no_extension + ".aligned.npy"
shp_filepath = filename_no_extension + ".aligned.shp"
np.save(npy_filepath, polygons)
geo_utils.save_shapefile_from_polygons(polygons, image_filepath, shp_filepath)
def get_abs_path(filepath):
working_dir = os.path.dirname(os.path.abspath(__file__))
if os.path.isabs(filepath):
abs_path = filepath
else:
abs_path = os.path.join(working_dir, filepath)
return abs_path
def print_hist(hist):
print("hist:")
for (bin, count) in zip(hist[1], hist[0]):
print("{}: {}".format(bin, count))
def clip_image(image, min, max):
image = np.maximum(np.minimum(image, max), min)
return image
def get_min_max(image, std_factor=2):
mu = np.mean(image, axis=(0, 1))
std = np.std(image, axis=(0, 1))
min = mu - std_factor * std
max = mu + std_factor * std
return min, max
def stretch_image(image, min, max, target_min, target_max):
image = (image - min) / (max - min)
image = image * (target_max - target_min) + target_min
return image
def check_polygons_in_image(image, polygons):
"""
Allows some vertices to be outside the image. Return s true if at least 1 is inside.
:param image:
:param polygons:
:return:
"""
height = image.shape[0]
width = image.shape[1]
min_i = min([polygon[:, 0].min() for polygon in polygons])
min_j = min([polygon[:, 1].min() for polygon in polygons])
max_i = max([polygon[:, 0].max() for polygon in polygons])
max_j = max([polygon[:, 1].max() for polygon in polygons])
return not (max_i < 0 or height < min_i or max_j < 0 or width < min_j)
def main():
# --- Process args --- #
args = get_args()
config = run_utils.load_config(args.config)
if config is None:
print_utils.print_error(
"ERROR: cannot continue without a config file. Exiting now...")
exit()
print_utils.print_info("Using downscaling factors: {}".format(args.ds_fac))
run_name_list = [RUN_NAME_FORMAT.format(ds_fac) for ds_fac in args.ds_fac]
# --- Read image --- #
print_utils.print_info("Reading image...")
image_filepath = get_abs_path(args.image)
image, image_metadata = read_image(image_filepath, args.pixelsize)
image = clip_image(image, 0, 255)
# hist = np.histogram(image)
# print_hist(hist)
im_min, im_max = get_min_max(image, std_factor=3)
# print("min: {}, max: {}".format(im_min, im_max))
image = stretch_image(image, im_min, im_max, 0, 255)
image = clip_image(image, 0, 255)
# hist = np.histogram(image)
# print_hist(hist)
print("Image stats:")
print("\tShape: {}".format(image.shape))
print("\tMin: {}".format(image.min()))
print("\tMax: {}".format(image.max()))
# --- Read shapefile if it exists --- #
if args.shapefile is not None:
shapefile_filepath = get_abs_path(args.shapefile)
gt_polygons = get_shapefile_annotations(image_filepath, shapefile_filepath)
else:
# --- Load or fetch OSM building data --- #
gt_polygons = get_osm_annotations(image_filepath)
# --- Print polygon info --- #
print("Polygons stats:")
print("\tCount: {}".format(len(gt_polygons)))
print("\tMin: {}".format(min([polygon.min() for polygon in gt_polygons])))
print("\tMax: {}".format(max([polygon.max() for polygon in gt_polygons])))
if not check_polygons_in_image(image, gt_polygons):
print_utils.print_error("ERROR: polygons are not inside the image. This is most likely due to using the wrong projection when reading the input shapefile. Aborting...")
exit()
print_utils.print_info("Aligning building annotations...")
aligned_polygons = test.test_align_gt(args.runs_dirpath, image, image_metadata, gt_polygons, args.batch_size,
args.ds_fac, run_name_list, config["disp_max_abs_value"],
output_shapefiles=False)
print_utils.print_info("Saving aligned building annotations...")
save_annotations(args.image, aligned_polygons)
if __name__ == '__main__':
main()
| 8,129 | 31.390438 | 176 | py |
mapalignment | mapalignment-master/projects/mapalign/mapalign_multires/3_test_plot.3_seg.py | import sys
import os
import matplotlib.pyplot as plt
import numpy as np
sys.path.append("../../utils")
import python_utils
# --- Params --- #
IOUS_FILENAME_EXTENSION = ".iou.npy"
SOURCE_PARAMS_LIST = [
{
"name": "Full method",
"path": "test/bradbury_buildings.seg.ds_fac_1",
"plot_color": "blue"
},
{
"name": "No disp loss",
"path": "test/bradbury_buildings.seg.ds_fac_1_no_disp_loss",
"plot_color": "orange"
},
# {
# "name": "Full method ds_fac=1 input_poly_coef 0.1",
# "path": "test/bradbury_buildings.seg.ds_fac_1",
# "plot_color": "darkorchid"
# },
# {
# "name": "No intermediary losses",
# "path": "test/bradbury_buildings.seg.ds_fac_1_no_interm_loss",
# "plot_color": "red"
# },
# {
# "name": "No dropping of input polygons",
# "path": "test/bradbury_buildings.seg.ds_fac_1_keep_poly_1",
# "plot_color": "darksalmon"
# },
# {
# "name": "Full method ds_fac=4",
# "path": "test/bradbury_buildings.seg.ds_fac_4",
# "plot_color": "blue"
# },
# # {
# # "name": "Full method ds_fac=4 input_poly_coef_1",
# # "path": "test/bradbury_buildings.seg.ds_fac_4_input_poly_coef_1",
# # "plot_color": "darkorchid"
# # },
# {
# "name": "Full method ds_fac=4 keep_poly_1",
# "path": "test/bradbury_buildings.seg.ds_fac_4_keep_poly_1",
# "plot_color": "darksalmon"
# },
# {
# "name": "Full method ds_fac=4 no_interm_loss",
# "path": "test/bradbury_buildings.seg.ds_fac_4_no_interm_loss",
# "plot_color": "red"
# },
]
ALPHA_MAIN = 1.0
ALPHA_INDIVIDUAL = 0.2
COLOR = 'cornflowerblue'
FILEPATH = "test/ious_compare.png"
# --- --- #
def main():
plt.figure(1, figsize=(4, 4))
for source_params in SOURCE_PARAMS_LIST:
thresholds_ious_filepath_list = python_utils.get_filepaths(source_params["path"], IOUS_FILENAME_EXTENSION)
# print(thresholds_ious_filepath_list)
thresholds_ious_list = []
for thresholds_ious_filepath in thresholds_ious_filepath_list:
thresholds_ious = np.load(thresholds_ious_filepath).item()
thresholds_ious_list.append(thresholds_ious)
# print(thresholds_ious_list)
# Plot main, min and max curves
ious_list = []
for thresholds_ious in thresholds_ious_list:
ious_list.append(thresholds_ious["ious"])
ious_table = np.stack(ious_list, axis=0)
ious_average = np.mean(ious_table, axis=0)
ious_average_area = np.trapz(ious_average, thresholds_ious_list[0]["thresholds"])
ious_average_max = np.max(ious_average)
ious_average_midpoint = ious_average[ious_average.shape[0]//2]
print("ious_average_area = {}".format(ious_average_area))
print("ious_average_max = {}".format(ious_average_max))
print("ious_average_midpoint = {}".format(ious_average_midpoint))
plt.plot(thresholds_ious_list[0]["thresholds"], ious_average, color=source_params["plot_color"], alpha=ALPHA_MAIN, label=source_params["name"])
# Plot all curves:
for thresholds_ious in thresholds_ious_list:
plt.plot(thresholds_ious["thresholds"], thresholds_ious["ious"],
color=source_params["plot_color"], alpha=ALPHA_INDIVIDUAL, label=source_params["name"])
plt.grid(True)
axes = plt.gca()
axes.set_xlim([0.0, 1.0])
axes.set_ylim([-0.01, 1.0])
# plt.title("IoU relative to the mask threshold")
plt.xlabel('Mask threshold')
plt.ylabel('IoU')
# Add legends in top-left
handles = [plt.Line2D([0], [0], color=source_params["plot_color"]) for source_params in SOURCE_PARAMS_LIST]
labels = [source_params["name"] for source_params in SOURCE_PARAMS_LIST]
plt.legend(handles, labels)
# Plot
plt.tight_layout()
plt.savefig(FILEPATH)
plt.show()
if __name__ == '__main__':
main()
| 4,025 | 31.731707 | 151 | py |
mapalignment | mapalignment-master/projects/mapalign/mapalign_multires/5_model_buildings.py | import os.path
import numpy as np
import bpy, bmesh
import config
# --- Params --- #
OUTPUT_BASE_DIRPATH = os.path.join(config.PROJECT_DIR, "3d_buildings/leibnitz")
SCALE = 0.1
IMAGE_HEIGHT = 12360 * 0.5 # In meters
IMAGE_WIDTH = 17184 * 0.5 # In meters
UV_SCALE = (1 / (IMAGE_HEIGHT * SCALE), 1 / (IMAGE_WIDTH * SCALE)) # (u, v)
# --- --- #
def build_buildings(polygon_list, heights):
bm = bmesh.new()
uv_layer = bm.loops.layers.uv.new()
for index, (polygon, height) in enumerate(zip(polygon_list, heights)):
if index % 1000 == 0:
print("Progress: {}/{}".format(index + 1, len(polygon_list)))
verts = []
for p in polygon:
vert = bm.verts.new((p[1], - p[0], 0))
verts.append(vert)
face = bm.faces.new(verts)
for p, loop in zip(polygon, face.loops):
loop[uv_layer].uv = (p[1] * UV_SCALE[0], 1 - p[0] * UV_SCALE[1])
# Extrude by height
r = bmesh.ops.extrude_discrete_faces(bm, faces=[face])
bmesh.ops.translate(bm, vec=(0, 0, height), verts=r['faces'][0].verts)
bm.normal_update()
me = bpy.data.meshes.new("polygon")
bm.to_mesh(me)
ob = bpy.data.objects.new("building", me)
bpy.context.scene.objects.link(ob)
bpy.context.scene.update()
# Load building footprints
polygon_list = np.load(os.path.join(OUTPUT_BASE_DIRPATH, "polygons.npy"))
scaled_polygon_list = [SCALE * polygon for polygon in polygon_list]
heights = np.load(os.path.join(OUTPUT_BASE_DIRPATH, "heights.npy"))
scaled_heights = SCALE * heights
# Build each building one at a time
print("# --- Starting to build buildings: --- #")
build_buildings(scaled_polygon_list, scaled_heights)
print("# --- Finished building buildings --- #")
| 1,792 | 27.460317 | 79 | py |
mapalignment | mapalignment-master/projects/mapalign/mapalign_multires/test.py | import sys
import os
import numpy as np
# Import visualization first as it might change the matplotlib backend
sys.path.append("../utils")
import visualization
import multires_pipeline
sys.path.append("../evaluate_funcs")
import evaluate_utils
sys.path.append("../../utils")
import polygon_utils
def generate_disp_data(normed_disp_field_maps, gt_polygons, disp_max_abs_value):
scaled_disp_field_maps = normed_disp_field_maps * disp_max_abs_value
disp_polygons_list = polygon_utils.apply_displacement_fields_to_polygons(gt_polygons,
scaled_disp_field_maps)
return disp_polygons_list
def measure_accuracies(polygons_1, polygons_2, thresholds, filepath):
accuracies = evaluate_utils.compute_threshold_accuracies([polygons_1], [polygons_2], thresholds)
threshold_accuracies = {
"thresholds": thresholds,
"accuracies": accuracies,
}
np.save(filepath, threshold_accuracies)
return accuracies
def measure_ious(gt_polygons, pred_seg, thresholds, filepath):
padding = (220 - 100) // 2 # TODO: retrieve this programmatically
gt_seg = polygon_utils.draw_polygon_map(gt_polygons, pred_seg.shape[:2], fill=True, edges=True, vertices=True)
# Crop both images to remove margin
pred_seg = pred_seg[padding:-padding, padding:-padding, :]
gt_seg = gt_seg[padding:-padding, padding:-padding, :]
# Reduce channels to single max value
pred_seg = np.max(pred_seg, axis=-1)
gt_seg = np.max(gt_seg, axis=-1)
gt_mask = gt_seg.astype(np.bool)
# Create thresholded masks
ious = []
for threshold in thresholds:
pred_mask = threshold < pred_seg
# import skimage.io
# skimage.io.imsave("pred_mask_{:0.02}.png".format(threshold), pred_mask * 255)
intersection = pred_mask & gt_mask
union = pred_mask | gt_mask
intersection_count = np.sum(intersection)
union_count = np.sum(union)
if 0 < union_count:
iou = intersection_count / float(union_count)
else:
iou = np.nan
ious.append(iou)
thresholds_ious = {
"thresholds": thresholds,
"ious": ious,
}
np.save(filepath, thresholds_ious)
return ious
def test(runs_dirpath, ori_image, ori_metadata, ori_gt_polygons, ori_disp_polygons, batch_size, ds_fac_list, run_name_list,
model_disp_max_abs_value, thresholds, test_output_dir, output_name, output_shapefiles=True,
properties_list=None):
if output_shapefiles:
assert properties_list is not None and len(ori_disp_polygons) == len(
properties_list), "ori_disp_polygons and properties_list should have the same length"
polygons_image_plot_filename_format = "{}.polygons.{}.png"
shapefile_filename_format = "{}.{}_polygons.shp"
segmentation_image_plot_filename_format = "{}.segmentation.png"
accuracies_filename_format = "{}.accuracy.npy"
# --- Run the model --- #
print("# --- Run the model --- #")
aligned_disp_polygons, segmentation_image = multires_pipeline.multires_inference(runs_dirpath, ori_image, ori_metadata,
ori_disp_polygons,
model_disp_max_abs_value,
batch_size, ds_fac_list,
run_name_list)
# aligned_disp_polygons = ori_disp_polygons
# segmentation_image = np.zeros((ori_image.shape[0], ori_image.shape[1], 4))
# --- Save segmentation_output --- #
print("# --- Save segmentation_output --- #")
plot_segmentation_image_filename = segmentation_image_plot_filename_format.format(output_name)
plot_segmentation_image_filepath = os.path.join(test_output_dir, plot_segmentation_image_filename)
visualization.save_plot_segmentation_image(plot_segmentation_image_filepath, segmentation_image)
# --- Save polygons plot --- #
save_polygon_names = ["all"] # Can be: ["all", "gt", "disp", "aligned"]
for save_polygon_name in save_polygon_names:
plot_image_filename = polygons_image_plot_filename_format.format(output_name, save_polygon_name)
plot_image_filepath = os.path.join(test_output_dir, plot_image_filename)
if save_polygon_name == "all":
visualization.save_plot_image_polygons(plot_image_filepath, ori_image, ori_gt_polygons, ori_disp_polygons,
aligned_disp_polygons)
elif save_polygon_name == "gt":
visualization.save_plot_image_polygons(plot_image_filepath, ori_image, ori_gt_polygons, [],
[], line_width=3)
elif save_polygon_name == "disp":
visualization.save_plot_image_polygons(plot_image_filepath, ori_image, [], ori_disp_polygons,
[], line_width=3)
elif save_polygon_name == "aligned":
visualization.save_plot_image_polygons(plot_image_filepath, ori_image, [], [],
aligned_disp_polygons, line_width=3)
# visualization.save_plot_image_polygons(plot_image_filepath, ori_image, [], ori_disp_polygons,
# aligned_disp_polygons)
# --- Save polygons as shapefiles --- #
if output_shapefiles:
import geo_utils
print("# --- Save polygons as shapefiles --- #")
output_shapefile_filename = shapefile_filename_format.format(output_name, "ori")
output_shapefile_filepath = os.path.join(test_output_dir, output_shapefile_filename)
geo_utils.save_shapefile_from_polygons(ori_gt_polygons, ori_metadata["filepath"],
output_shapefile_filepath, properties_list=properties_list)
output_shapefile_filename = shapefile_filename_format.format(output_name, "misaligned")
output_shapefile_filepath = os.path.join(test_output_dir, output_shapefile_filename)
geo_utils.save_shapefile_from_polygons(ori_disp_polygons, ori_metadata["filepath"],
output_shapefile_filepath, properties_list=properties_list)
output_shapefile_filename = shapefile_filename_format.format(output_name, "aligned")
output_shapefile_filepath = os.path.join(test_output_dir, output_shapefile_filename)
geo_utils.save_shapefile_from_polygons(aligned_disp_polygons, ori_metadata["filepath"],
output_shapefile_filepath, properties_list=properties_list)
# --- Measure accuracies --- #
if len(ori_gt_polygons) == len(aligned_disp_polygons):
print("# --- Measure accuracies --- #")
accuracies_filename = accuracies_filename_format.format(output_name)
accuracies_filepath = os.path.join(test_output_dir, accuracies_filename)
accuracies = measure_accuracies(ori_gt_polygons, aligned_disp_polygons, thresholds, accuracies_filepath)
print(accuracies)
return aligned_disp_polygons
def test_align_gt(runs_dirpath, ori_image, ori_metadata, ori_gt_polygons, batch_size, ds_fac_list, run_name_list,
model_disp_max_abs_value, output_dir=None, output_name=None, output_shapefiles=True, properties_list=None):
# --- Run the model --- #
print("# --- Run the model --- #")
aligned_gt_polygons, _ = multires_pipeline.multires_inference(runs_dirpath, ori_image,
ori_metadata,
ori_gt_polygons,
model_disp_max_abs_value,
batch_size, ds_fac_list,
run_name_list)
# --- Save polygons as shapefiles --- #
if output_shapefiles:
if output_dir is not None and output_name is not None:
import geo_utils
print("# --- Save polygons as shapefiles --- #")
shapefile_filename_format = "{}.aligned_gt_polygons.shp"
output_shapefile_filename = shapefile_filename_format.format(output_name, "ori")
output_shapefile_filepath = os.path.join(output_dir, output_shapefile_filename)
geo_utils.save_shapefile_from_polygons(aligned_gt_polygons, ori_metadata["filepath"],
output_shapefile_filepath, properties_list=properties_list)
else:
print_utils.print_warning("Could not save shapefile as output_dir and/or output_name was not specified.")
return aligned_gt_polygons
def test_image_with_gt_and_disp_polygons(runs_dirpath, image_name, ori_image, ori_metadata, ori_gt_polygons, ori_disp_polygons,
ori_disp_properties_list,
batch_size, ds_fac_list, run_name_list, model_disp_max_abs_value, thresholds,
test_output_dir, output_shapefiles=True):
ori_gt_polygons = polygon_utils.polygons_remove_holes(ori_gt_polygons) # TODO: Remove
ori_gt_polygons = polygon_utils.simplify_polygons(ori_gt_polygons, tolerance=1) # Remove redundant vertices
ori_disp_polygons = polygon_utils.polygons_remove_holes(ori_disp_polygons) # TODO: Remove
ori_disp_polygons = polygon_utils.simplify_polygons(ori_disp_polygons, tolerance=1) # Remove redundant vertices
output_name = image_name
test(runs_dirpath, ori_image, ori_metadata, ori_gt_polygons, ori_disp_polygons, batch_size, ds_fac_list, run_name_list,
model_disp_max_abs_value, thresholds, test_output_dir, output_name, output_shapefiles=output_shapefiles,
properties_list=ori_disp_properties_list)
def test_image_with_gt_polygons_and_disp_maps(runs_dirpath, image_name, ori_image, ori_metadata, ori_gt_polygons,
ori_normed_disp_field_maps, disp_max_abs_value, batch_size,
ds_fac_list, run_name_list, model_disp_max_abs_value, thresholds,
test_output_dir,
output_shapefiles=True):
output_name_format = "{}.disp_{:02d}"
ori_gt_polygons = polygon_utils.polygons_remove_holes(ori_gt_polygons) # TODO: Remove
# Remove redundant vertices
ori_gt_polygons = polygon_utils.simplify_polygons(ori_gt_polygons, tolerance=1)
disp_polygons_list = generate_disp_data(ori_normed_disp_field_maps, ori_gt_polygons,
disp_max_abs_value)
for i in range(len(disp_polygons_list)):
print("# --- Testing with disp {:02d} --- #".format(i))
disp_polygons = disp_polygons_list[i]
output_name = output_name_format.format(image_name, i)
test(runs_dirpath, ori_image, ori_metadata, ori_gt_polygons, disp_polygons, batch_size, ds_fac_list,
run_name_list,
model_disp_max_abs_value, thresholds, test_output_dir, output_name, output_shapefiles=output_shapefiles)
def test_detect_new_buildings(runs_dirpath, image_name, ori_image, ori_metadata, ori_gt_polygons, batch_size, ds_fac_list,
run_name_list, model_disp_max_abs_value, polygonization_params, thresholds,
test_output_dir, output_shapefiles=True):
ori_gt_polygons = polygon_utils.polygons_remove_holes(ori_gt_polygons) # TODO: Remove
ori_gt_polygons = polygon_utils.simplify_polygons(ori_gt_polygons, tolerance=1) # Remove redundant vertices
ori_disp_polygons = [] # Don't input any polygons
output_name = image_name
seg_image_plot_filename_format = "{}.segmentation.png"
ious_filename_format = "{}.iou.npy"
new_polygons_filename_format = "{}.new_polygons.npy"
aligned_new_polygons_filename_format = "{}.aligned_new_polygons.npy"
polygons_image_plot_filename_format = "{}.polygons.png"
shapefile_filename_format = "{}.{}_polygons.shp"
accuracies_filename_format = "{}.accuracy.npy"
# --- Get the segmentation output --- #
seg_ds_fac_list = ds_fac_list[-1:]
seg_run_name_list = run_name_list[-1:]
print("# --- Run the model --- #")
_, segmentation_image = multires_pipeline.multires_inference(runs_dirpath, ori_image, ori_metadata,
ori_disp_polygons,
model_disp_max_abs_value,
batch_size, seg_ds_fac_list,
seg_run_name_list)
# segmentation_image = np.zeros((ori_image.shape[0], ori_image.shape[1], 4))
print("# --- Save segmentation_output --- #")
plot_segmentation_image_filename = seg_image_plot_filename_format.format(output_name)
plot_segmentation_image_filepath = os.path.join(test_output_dir, plot_segmentation_image_filename)
visualization.save_plot_segmentation_image(plot_segmentation_image_filepath, segmentation_image)
seg_image = segmentation_image[:, :, 1:] # Remove background channel
# --- Measure IoUs --- #
print("# --- Measure accuracies --- #")
print(seg_image.min())
print(seg_image.max())
iou_thresholds = np.arange(0, 1.01, 0.01)
ious_filename = ious_filename_format.format(output_name)
ious_filepath = os.path.join(test_output_dir, ious_filename)
ious = measure_ious(ori_gt_polygons, seg_image, iou_thresholds, ious_filepath)
print("IoUs:")
print(ious)
# --- Polygonize segmentation --- #
print("# --- Polygonize segmentation --- #")
# TODO: remove:
# seg_image_filepath = "test/bradbury_buildings.1_double.only_seg/SanFrancisco_01.disp_00.segmentation.png"
# seg_image = image_utils.load_image(seg_image_filepath)
# seg_image = seg_image / 255
fill_threshold = polygonization_params["fill_threshold"]
outline_threshold = polygonization_params["outline_threshold"]
selem_width = polygonization_params["selem_width"]
iterations = polygonization_params["iterations"]
# new_polygons = polygonize_buildings.find_building_contours_from_seg(seg_image, fill_threshold,
# outline_threshold, selem_width, iterations)
# print("# --- Save new polygons--- #")
# new_polygons_filename = new_polygons_filename_format.format(output_name)
# new_polygons_filepath = os.path.join(test_output_dir, new_polygons_filename)
# np.save(new_polygons_filepath, new_polygons)
#
# # --- Align new polygons --- #
# print("# --- Align new polygons --- #")
# print("# --- Run the model --- #")
# aligned_new_polygons = new_polygons
# aligned_new_polygons, segmentation_image = multires_pipeline.multires_inference(runs_dirpath, ori_image, ori_metadata,
# aligned_new_polygons,
# model_disp_max_abs_value,
# batch_size, ds_fac_list,
# run_name_list)
# # for i in range(10):
# # aligned_new_polygons, segmentation_image = multires_pipeline.multires_inference(runs_dirpath, ori_image, ori_metadata,
# # aligned_new_polygons,
# # model_disp_max_abs_value,
# # batch_size, ds_fac_list[-1:],
# # run_name_list[-1:])
# print("# --- Save aligned new polygons--- #")
# aligned_new_polygons_filename = aligned_new_polygons_filename_format.format(output_name)
# aligned_new_polygons_filepath = os.path.join(test_output_dir, aligned_new_polygons_filename)
# np.save(aligned_new_polygons_filepath, aligned_new_polygons)
# print("# --- Save polygons plot--- #")
# plot_image_filename = polygons_image_plot_filename_format.format(output_name)
# plot_image_filepath = os.path.join(test_output_dir, plot_image_filename)
# visualization.save_plot_image_polygons(plot_image_filepath, ori_image, ori_gt_polygons, new_polygons,
# aligned_new_polygons)
#
# # --- Save polygons as shapefiles --- #
# if output_shapefiles:
# print("# --- Save polygons as shapefiles --- #")
# output_shapefile_filename = shapefile_filename_format.format(output_name, "new_polygons")
# output_shapefile_filepath = os.path.join(test_output_dir, output_shapefile_filename)
# geo_utils.save_shapefile_from_polygons(new_polygons, ori_metadata["filepath"], output_shapefile_filepath)
# output_shapefile_filename = shapefile_filename_format.format(output_name, "aligned_new_polygons")
# output_shapefile_filepath = os.path.join(test_output_dir, output_shapefile_filename)
# geo_utils.save_shapefile_from_polygons(aligned_new_polygons, ori_metadata["filepath"],
# output_shapefile_filepath)
# # --- Measure accuracies --- #
# print("# --- Measure accuracies --- #")
# accuracies_filename = accuracies_filename_format.format(output_name)
# accuracies_filepath = os.path.join(test_output_dir, accuracies_filename)
# accuracies = measure_accuracies(ori_gt_polygons, new_polygons, thresholds, accuracies_filepath)
# print("New polygons:")
# print(accuracies)
#
# accuracies_filename = accuracies_filename_format.format(output_name)
# accuracies_filepath = os.path.join(test_output_dir + ".no_align", accuracies_filename)
# integer_thresholds = [threshold for threshold in thresholds if (int(threshold) == threshold)]
# accuracies = measure_accuracies(ori_gt_polygons, aligned_new_polygons, integer_thresholds, accuracies_filepath)
# print("Aligned new polygons:")
# print(accuracies)
def main():
pass
if __name__ == '__main__':
main()
| 18,905 | 55.774775 | 130 | py |
mapalignment | mapalignment-master/projects/mapalign/mapalign_multires/model_utils.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import sys
import tensorflow as tf
sys.path.append("../../utils")
import tf_utils
import print_utils
# --- Params --- #
DEBUG = False
SUMMARY = False
# --- --- #
def print_debug(obj):
if DEBUG:
print_utils.print_debug(obj.__str__())
def conv_conv_pool(input_, n_filters, name="", pool=True, activation=tf.nn.elu, weight_decay=None,
dropout_keep_prob=None):
"""{Conv -> BN -> RELU}x2 -> {Pool, optional}
Args:
input_ (4-D Tensor): (batch_size, H, W, C)
n_filters (list): number of filters [int, int]
training (1-D Tensor): Boolean Tensor
name (str): name postfix
pool (bool): If True, MaxPool2D
activation: Activation function
weight_decay: Weight decay rate
Returns:
net: output of the Convolution operations
pool (optional): output of the max pooling operations
"""
net = input_
with tf.variable_scope("layer_{}".format(name)):
for i, F in enumerate(n_filters):
net = tf_utils.complete_conv2d(net, F, (3, 3), padding="VALID", activation=activation,
bias_init_value=-0.01,
weight_decay=weight_decay,
summary=SUMMARY)
if pool is False:
return net, None
else:
pool = tf.layers.max_pooling2d(net, (2, 2), strides=(2, 2), name="pool_{}".format(name))
return net, pool
def upsample_crop_concat(to_upsample, input_to_crop, size=(2, 2), weight_decay=None, name=None):
"""Upsample `to_upsample`, crop to match resolution of `input_to_crop` and concat the two.
Args:
input_A (4-D Tensor): (N, H, W, C)
input_to_crop (4-D Tensor): (N, 2*H + padding, 2*W + padding, C2)
size (tuple): (height_multiplier, width_multiplier) (default: (2, 2))
name (str): name of the concat operation (default: None)
Returns:
output (4-D Tensor): (N, size[0]*H, size[1]*W, 2*C2)
"""
H, W, _ = to_upsample.get_shape().as_list()[1:]
_, _, target_C = input_to_crop.get_shape().as_list()[1:]
H_multi, W_multi = size
target_H = H * H_multi
target_W = W * W_multi
upsample = tf.image.resize_bilinear(to_upsample, (target_H, target_W), name="upsample_{}".format(name))
upsample = tf_utils.complete_conv2d(upsample, target_C, (3, 3), padding="SAME", bias_init_value=-0.01,
weight_decay=weight_decay,
summary=SUMMARY)
# TODO: initialize upsample with bilinear weights
# upsample = tf.layers.conv2d_transpose(to_upsample, target_C, kernel_size=2, strides=1, padding="valid", name="deconv{}".format(name))
crop = tf.image.resize_image_with_crop_or_pad(input_to_crop, target_H, target_W)
return tf.concat([upsample, crop], axis=-1, name="concat_{}".format(name))
def upsample_crop(input, resolution, factor=(2, 2), name=None):
"""
Scales the input displacement field map by factor.
First upsamples by factor,
then crops to resolution.
:param input: Tensor to upsample and then crop
:param resolution: Output resolution (row_count, col_count)
:param factor: Factor of scaling (row_factor, col_factor)
:param name: Name of op
:return: Upsampled + cropped tensor
"""
# Upsample
up_size = (input.shape[1] * factor[0], input.shape[2] * factor[1])
input_upsampled = tf.image.resize_bilinear(input, up_size, name="upsample_{}".format(name))
# Crop
input_cropped = tf.image.resize_image_with_crop_or_pad(input_upsampled, resolution[0], resolution[1])
return input_cropped
def build_input_branch(input, feature_base_count, pool_count, name="", weight_decay=None):
res_levels = pool_count + 1
with tf.variable_scope(name):
print_debug(name)
levels = []
for res_level_index in range(res_levels):
print_debug("\tlevel {}:".format(res_level_index))
feature_count = feature_base_count * math.pow(2, res_level_index)
if res_level_index == 0:
# Add first level
conv, pool = conv_conv_pool(input, [feature_count, feature_count],
name="conv_pool_{}".format(res_level_index), weight_decay=weight_decay)
elif res_level_index < res_levels - 1:
# Add all other levels (except the last one)
level_input = levels[-1][1] # Select the previous pool
conv, pool = conv_conv_pool(level_input, [feature_count, feature_count],
name="conv_pool_{}".format(res_level_index), weight_decay=weight_decay)
elif res_level_index == res_levels - 1:
# Add last level
level_input = levels[-1][1] # Select the previous pool
conv, pool = conv_conv_pool(level_input, [feature_count, feature_count],
name="conv_pool_{}".format(res_level_index), pool=False,
weight_decay=weight_decay)
else:
print("WARNING: Should be impossible to get here!")
conv = pool = None
print_debug("\t\tconv: {}".format(conv))
print_debug("\t\tpool: {}".format(pool))
levels.append((conv, pool))
return levels
def build_common_part(branch_levels_list, feature_base_count,
name="", weight_decay=None):
"""
Merges the two branches level by level in a U-Net fashion
:param branch_levels_list:
:param feature_base_count:
:param name:
:param weight_decay:
:return:
"""
res_levels = len(branch_levels_list[0])
with tf.variable_scope(name):
print_debug(name)
# Concat branches at each level + add conv layers
levels = []
for level_index in range(res_levels):
print_debug("\tlevel {}:".format(level_index))
concat_a_b = tf.concat([branch_levels[level_index][0] for branch_levels in branch_levels_list], axis=-1,
name="concat_a_b_{}".format(level_index))
print_debug("\t\tconcat_a_b: {}".format(concat_a_b))
feature_count = feature_base_count * math.pow(2, level_index)
concat_a_b_conv, _ = conv_conv_pool(concat_a_b, [feature_count, feature_count],
name="concat_a_b_conv{}".format(level_index), pool=False,
weight_decay=weight_decay)
print_debug("\t\tconcat_a_b_conv: {}".format(concat_a_b_conv))
levels.append(concat_a_b_conv)
return levels
def build_output_branch(input_levels, feature_base_count, name="", weight_decay=None):
with tf.variable_scope(name):
print_debug(name)
res_levels = len(input_levels)
prev_level_output = None
output_levels = []
for level_index in range(res_levels - 1, -1, -1):
print_debug("\tlevel {}:".format(level_index))
if prev_level_output is None:
# This means we are at the bottom of the "U" of the U-Net
prev_level_output = input_levels[level_index]
else:
# Now concat prev_level_output with current input level
up = upsample_crop_concat(prev_level_output, input_levels[level_index], weight_decay=weight_decay,
name="up_{}".format(level_index))
print_debug("\t\tup: {}".format(up))
feature_count = feature_base_count * math.pow(2, level_index)
final_conv, _ = conv_conv_pool(up, [feature_count, feature_count],
name="final_conv_{}".format(level_index), pool=False,
weight_decay=weight_decay)
print_debug("\t\tfinal_conv: {}".format(final_conv))
output_levels.insert(0, final_conv) # Insert at the beginning because we are iterating in reverse order
prev_level_output = final_conv
return output_levels
def build_pred_branch(input_levels, output_channels, name=""):
with tf.variable_scope(name):
print_debug(name)
output_levels = []
output_level_0 = None
level_0_resolution = None
for level_index, input in enumerate(input_levels):
print_debug("\tlevel {}:".format(level_index))
# Add prediction layer then upsample prediction to match level 0's prediction resolution
pred = tf.layers.conv2d(input, output_channels, (1, 1), name="pred_conv1x1_level_{}".format(level_index),
padding='VALID')
tf.summary.histogram("pred_{}".format(level_index), pred)
print_debug("\t\tpred: {}".format(pred))
if level_index == 0:
output_level_0 = pred
level_0_resolution = pred.get_shape().as_list()[1:3]
else:
# Upsample pred and crop to the resolution of the first level
single_factor = math.pow(2, level_index)
pred = upsample_crop(pred, level_0_resolution, (single_factor, single_factor),
name="convert_disp_pred_{}".format(level_index))
output_levels.append(pred)
stacked_output_levels = tf.stack(output_levels, axis=1, name="stacked_preds")
print_debug("\tstacked_output_levels: {}".format(stacked_output_levels))
return output_level_0, stacked_output_levels
def build_multibranch_unet(input_branch_params_list, pool_count, common_feature_base_count, output_branch_params_list,
weight_decay=None):
"""
Builds a multi-branch U-Net network. Has len(input_tensors) input branches and len(output_channel_counts) output branches.
:param input_branch_params_list: [
{
"tensor": input Tensorflow tensor,
"name": name used in internal scope of the graph,
"feature_base_count": number of features of the first conv for the each input branch. Multiplied by 2 after each conv_conv block,
},
...
]
:param pool_count: number of 2x2 pooling operations. Results in (pool_count+1) resolution levels
:param common_feature_base_count: number of features of the first conv for the common part of the network. Multiplied by 2 after each conv_conv block
:param output_branch_params_list: [
{
"feature_base_count": like input feature_base_counts but for outputs,
"channel_count": integer for the final channel count,
"activation": final activation function,
"name": name used in internal scope of the graph,
},
...
]
:param weight_decay: (Default: None). Weight decay rate
:return: output heads, keep_prob (not used)
"""
# Dropout - controls the complexity of the model, prevents co-adaptation of features.
with tf.name_scope('dropout'):
keep_prob = tf.placeholder(tf.float32)
# Build the separate simple convolution networks for each input:
input_branch_levels_list = []
for params in input_branch_params_list:
tf.summary.histogram("input_{}".format(params["name"]), params["tensor"])
branch_levels = build_input_branch(params["tensor"], params["feature_base_count"], pool_count,
name="branch_{}".format(params["name"]),
weight_decay=weight_decay)
input_branch_levels_list.append(branch_levels)
# Build the common part of the network, concatenating inout branches at all levels
common_part_levels = build_common_part(input_branch_levels_list,
common_feature_base_count,
name="common_part",
weight_decay=weight_decay)
# Build the splitting part of the network, each level (except the last one) finishing with output branches.
# Each branch is like the upsampling part of a U-Net
outputs = []
for params in output_branch_params_list:
branch_levels = build_output_branch(common_part_levels,
params["feature_base_count"],
name="branch_{}".format(params["name"]),
weight_decay=weight_decay)
# Add the last layers for prediction, then upsample each levels' prediction to level 0's resolution
# TODO: keep this for legacy reasons:
if params["activation"] == tf.identity:
name = "branch_{}_pred_logit".format(params["name"])
else:
name = "branch_{}_pred".format(params["name"])
level_0_pred, stacked_pred_logits = build_pred_branch(branch_levels,
output_channels=params["channel_count"],
name=name)
# Apply activation function to logits
stacked_preds = params["activation"](stacked_pred_logits)
output = (stacked_pred_logits, stacked_preds, stacked_preds[:, 0, ...])
outputs.append(output)
return outputs, keep_prob
def build_double_unet(input_image, input_poly_map,
image_feature_base_count, poly_map_feature_base_count, common_feature_base_count, pool_count,
disp_output_channels, add_seg_output=True, seg_output_channels=1,
weight_decay=None):
"""
Build the double U-Net network. Has two input branches and two output branches (actually, each resolution level
except the last one have two output branches).
:param input_image: image
:param input_poly_map: polygon_map
:param image_feature_base_count: number of features of the first conv for the image branch. Multiplied by 2 after each conv_conv block
:param poly_map_feature_base_count: number of features of the first conv for the polygon map branch. Multiplied by 2 after each conv_conv block
:param common_feature_base_count: number of features of the first conv for the common part of the network. Multiplied by 2 after each conv_conv block
:param pool_count: number of 2x2 pooling operations. Results in (pool_count+1) resolution levels
:param disp_output_channels: Output dimension for the displacement prediction
:param add_seg_output: (Default: True). If True, a segmentation output branch is built. If False, no additional branch is built and the seg_output_channels argument is ignored.
:param seg_output_channels: Output dimension for the segmentation prediction
:param weight_decay: (Default: None). Weight decay rate
:return: Network
"""
# Dropout - controls the complexity of the model, prevents co-adaptation of features.
with tf.name_scope('dropout'):
keep_prob = tf.placeholder(tf.float32)
tf.summary.histogram("input_image", input_image)
tf.summary.histogram("input_poly_map", input_poly_map)
# Build the two separate simple convolution networks for each input
branch_image_levels = build_input_branch(input_image, image_feature_base_count, pool_count,
name="branch_image",
weight_decay=weight_decay)
branch_poly_map_levels = build_input_branch(input_poly_map, poly_map_feature_base_count, pool_count,
name="branch_poly_map",
weight_decay=weight_decay)
# Build the common part of the network, concatenating the image and polygon map branches at all levels
common_part_levels = build_common_part([branch_image_levels, branch_poly_map_levels],
common_feature_base_count,
name="common_part",
weight_decay=weight_decay)
# Build the splitting part of the network, each level (except the last one) finishing with two branches: one for
# displacement map prediction and the other for segmentation prediction. Each branch is like the upsampling part of
# A U-Net
disp_levels = build_output_branch(common_part_levels,
common_feature_base_count,
name="branch_disp",
weight_decay=weight_decay)
if add_seg_output:
seg_levels = build_output_branch(common_part_levels,
common_feature_base_count,
name="branch_seg",
weight_decay=weight_decay)
else:
seg_levels = None
# Add the last layers for prediction, then upsample each levels' prediction to level 0's resolution
level_0_disp_pred_logit, stacked_disp_pred_logits = build_pred_branch(disp_levels,
output_channels=disp_output_channels,
name="branch_disp_pred")
level_0_disp_pred = tf.nn.tanh(level_0_disp_pred_logit)
stacked_disp_preds = tf.nn.tanh(stacked_disp_pred_logits)
if add_seg_output:
level_0_seg_pred_logit, stacked_seg_pred_logits = build_pred_branch(seg_levels,
output_channels=seg_output_channels,
name="branch_seg_pred_logit")
# Apply sigmoid to level_0_seg_pred_logit
level_0_seg_pred = tf.nn.sigmoid(level_0_seg_pred_logit)
else:
stacked_seg_pred_logits = None
level_0_seg_pred = None
return level_0_disp_pred, stacked_disp_preds, level_0_seg_pred, stacked_seg_pred_logits, keep_prob
def get_output_res(input_res, pool_count):
"""
This function has to be re-written if the model architecture changes
:param input_res:
:param pool_count:
:return:
"""
current_res = input_res
non_zero_remainder = False
# branch_image
for i in range(pool_count):
current_res -= 4 # 2 conv3x3
current_res, r = divmod(current_res, 2) # pool
non_zero_remainder = non_zero_remainder or bool(r)
current_res -= 4 # 2 conv3x3 of the last layer
# common_part
current_res -= 4 # 2 conv3x3
# branch_disp
for i in range(pool_count):
current_res *= 2 # upsample
current_res -= 4 # 2 conv3x3
if non_zero_remainder:
print(
"WARNING: a pooling operation will result in a non integer res, the network will automatically add padding there. The output of this function is not garanteed to be exact.")
return int(current_res), non_zero_remainder
def get_input_res(output_res, pool_count):
"""
This function has to be re-written if the model architecture changes
:param output_res:
:param pool_count:
:return:
"""
current_res = output_res
non_zero_remainder = False
# branch_disp
for i in range(pool_count):
current_res += 4 # 2 conv3x3
current_res, r = divmod(current_res, 2) # upsample
non_zero_remainder = non_zero_remainder or bool(r)
# common_part
current_res += 4 # 2 conv3x3
# branch_image
current_res += 4 # 2 conv3x3 of the last layer
for i in range(pool_count):
current_res *= 2 # pool
current_res += 4 # 2 conv3x3
return int(current_res), non_zero_remainder
def get_min_input_res(pool_count):
"""
Returns the minimum input resolution the network can handle.
Because of no-padding, the resolution of the ouput is smaller than the input and
thus there is a limit input resolution that works)
This function has to be re-written if the model architecture changes
:param pool_count:
:return:
"""
min_input_res = None
output_res = 0
non_zero_remainder = True
while non_zero_remainder:
output_res += 1
min_input_res, non_zero_remainder = get_input_res(output_res, pool_count)
return min_input_res
| 20,710 | 44.820796 | 185 | py |
mapalignment | mapalignment-master/projects/mapalign/mapalign_multires/2_test_stereo_real_displacements.py | import sys
import os
import numpy as np
import itertools
import config
import test
sys.path.append("../../../data/stereo_dataset")
import read
sys.path.append("../../utils")
import geo_utils
# --- Params --- #
DATASET_DIR = os.path.join(config.PROJECT_DIR, "../../../data/stereo_dataset")
FILE_PARAMS = {
"raw_dataset_dir": os.path.join(DATASET_DIR, "raw"),
"gt_views": ["ref", "rec"],
"image_name_suffix": "ortho",
"image_modes": ["RGB", "NIRRG"],
"image_extension": "tif",
"image_format": "{}_{}_{}_{}.{}", # To be used as IMAGE_FORMAT.format(name, image_name_suffix, gt_views[i], image_modes[j], image_extension)
"poly_name_capitalize": True, # If True, the gt name will be capitalised when building the gt filename to load
"poly_tag": "buildings",
"poly_extension": "filtered.shp", # Use filtered shapefiles (no intersecting polygons)
"poly_format": "{}_{}_{}.{}", # To be used as IMAGE_FORMAT.format(capitalize(name), POLY_TAG, GT_VIEWS[i], poly_extension)
}
TEST_IMAGES = ["leibnitz"]
# Models
BATCH_SIZE = 6
DS_FAC_LIST = [8, 4, 2] # Must be in descending order
RUN_NAME_LIST = [
"ds_fac_8",
"ds_fac_4",
"ds_fac_2",
]
assert len(DS_FAC_LIST) == len(RUN_NAME_LIST), "DS_FAC_LIST and RUN_NAME_LIST should have the same length (and match)"
MODEL_DISP_MAX_ABS_VALUE = 4
# Both list should match and be in descending order of downsampling factor.
THRESHOLDS = np.arange(0, 16.25, 0.25)
TEST_OUTPUT_DIR = "test/stereo_dataset_real_displacements.align.ds_fac_8.ds_fac_4.ds_fac_2"
# --- --- #
def test_image(image_name, view_pair, file_params, batch_size, ds_fac_list, run_name_list, model_disp_max_abs_value, thresholds, test_output_dir):
# --- Load data --- #
ori_image, ori_metadata = read.load_image_data(image_name, view_pair[0], file_params)
ori_gt_polygons, ori_gt_properties_list = read.load_polygon_data(image_name, view_pair[0], file_params)
ori_disp_polygons, ori_disp_properties_list = read.load_polygon_data(image_name, view_pair[1], file_params)
# --- Test --- #
# Add view to the image name (otherwise the result of the last view will overwrite previous ones)
test_image_name = image_name + "_" + view_pair[0] + "_" + view_pair[1]
test.test_image_with_gt_and_disp_polygons(test_image_name, ori_image, ori_metadata, ori_gt_polygons, ori_disp_polygons, ori_disp_properties_list, batch_size, ds_fac_list, run_name_list, model_disp_max_abs_value, thresholds, test_output_dir)
def main():
if not os.path.exists(TEST_OUTPUT_DIR):
os.makedirs(TEST_OUTPUT_DIR)
view_pairs = itertools.permutations(FILE_PARAMS["gt_views"])
for image_name in TEST_IMAGES:
for view_pair in view_pairs:
test_image(image_name, view_pair, FILE_PARAMS, BATCH_SIZE, DS_FAC_LIST, RUN_NAME_LIST, MODEL_DISP_MAX_ABS_VALUE, THRESHOLDS, TEST_OUTPUT_DIR)
if __name__ == '__main__':
main()
| 2,916 | 36.883117 | 244 | py |
mapalignment | mapalignment-master/projects/mapalign/mapalign_multires/2_test_bradbury_buildings.align_gt.py | import sys
import os
import tensorflow as tf
import numpy as np
import test
# CHANGE to the path of your own read.py script:
sys.path.append("../../../data/bradbury_buildings_roads_height_dataset")
import read as read_bradbury_buildings
sys.path.append("../../utils")
import run_utils
import python_utils
import print_utils
# --- Command-line FLAGS --- #
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_integer('batch_size', None, "Batch size. Generally set as large as the VRAM can handle.")
# Some examples:
# On Quadro M2200, 4GB VRAM: python 2_test_aerial_image.align_gt.py --batch_size=12
# On GTX 1080 Ti, 11GB VRAM: python 2_test_aerial_image.align_gt.py --batch_size=32
# --- --- #
# --- Params --- #
# CHANGE to you own test config file:
TEST_CONFIG_NAME = "config.test.bradbury_buildings.align_gt"
# Must be in descending order:
DS_FAC_LIST = [
8,
4,
2,
1,
]
RUNS_DIRPATH = "runs.igarss2019"
RUN_NAME_LIST = ["ds_fac_{}_noisy_inria_bradbury_all_1".format(ds_fac) for ds_fac in DS_FAC_LIST]
OUTPUT_DIRNAME_EXTENTION = "." + ".".join(RUN_NAME_LIST)
INPUT_POLYGONS_FILENAME_EXTENSION = "_buildingCoord.csv" # Set to None to use default gt polygons
ALIGNED_GT_POLYGONS_FILENAME_EXTENSION = "_aligned_noisy_building_polygons_1.npy"
# --- --- #
def test_image(runs_dirpath, dataset_raw_dirpath, image_info, batch_size, ds_fac_list, run_name_list,
model_disp_max_abs_value, output_dir, output_shapefiles):
# --- Load data --- #
# CHANGE the arguments of the load_gt_data() function if using your own and it does not take the same arguments:
ori_image, ori_metadata, gt_polygons = read_bradbury_buildings.load_gt_data(dataset_raw_dirpath, image_info["city"],
image_info["number"])
if INPUT_POLYGONS_FILENAME_EXTENSION is not None:
gt_polygons = read_bradbury_buildings.load_polygons(dataset_raw_dirpath, image_info["city"], image_info["number"], INPUT_POLYGONS_FILENAME_EXTENSION)
else:
gt_polygons = gt_polygons
if gt_polygons is not None:
# CHANGE the arguments of the IMAGE_NAME_FORMAT format string if using your own and it does not take the same arguments:
image_name = read_bradbury_buildings.IMAGE_NAME_FORMAT.format(city=image_info["city"], number=image_info["number"])
print_utils.print_info("Processing image {}".format(image_name))
aligned_gt_polygons = test.test_align_gt(runs_dirpath, ori_image, ori_metadata, gt_polygons, batch_size,
ds_fac_list, run_name_list,
model_disp_max_abs_value, output_dir, image_name,
output_shapefiles=output_shapefiles)
# Save aligned_gt_polygons in dataset dir:
aligned_gt_polygons_filepath = read_bradbury_buildings.get_polygons_filepath(dataset_raw_dirpath, image_info["city"], image_info["number"], ALIGNED_GT_POLYGONS_FILENAME_EXTENSION)
os.makedirs(os.path.dirname(aligned_gt_polygons_filepath), exist_ok=True)
np.save(aligned_gt_polygons_filepath, aligned_gt_polygons)
def main(_):
# load config file
config_test = run_utils.load_config(TEST_CONFIG_NAME)
# Handle FLAGS
if FLAGS.batch_size is not None:
batch_size = FLAGS.batch_size
else:
batch_size = config_test["batch_size"]
print("#--- Used params: ---#")
print("batch_size: {}".format(FLAGS.batch_size))
# Find data_dir
data_dir = python_utils.choose_first_existing_path(config_test["data_dir_candidates"])
if data_dir is None:
print("ERROR: Data directory not found!")
exit()
else:
print("Using data from {}".format(data_dir))
dataset_raw_dirpath = os.path.join(data_dir, config_test["dataset_raw_partial_dirpath"])
output_dir = config_test["align_dir"] + OUTPUT_DIRNAME_EXTENTION
if not os.path.exists(output_dir):
os.makedirs(output_dir)
for images_info in config_test["images_info_list"]:
for number in images_info["numbers"]:
image_info = {
"city": images_info["city"],
"number": number,
}
test_image(RUNS_DIRPATH, dataset_raw_dirpath, image_info, batch_size, DS_FAC_LIST,
RUN_NAME_LIST, config_test["model_disp_max_abs_value"],
output_dir, config_test["output_shapefiles"])
if __name__ == '__main__':
tf.app.run(main=main)
| 4,555 | 36.344262 | 187 | py |
mapalignment | mapalignment-master/projects/mapalign/mapalign_multires/loss_utils.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import tensorflow as tf
sys.path.append("../../utils")
import tf_utils
def displacement_error(gt, preds, level_loss_coefs, polygon_map, disp_loss_params):
"""
:param gt: Groundtruth displacement map bounded between -1 and 1. Shape [batch, height, width, channels (3)]
:param preds: Predicted displacement maps bounded between -1 and 1. Shape [batch, levels, height, width, channels (2)]
:param level_loss_coefs: Loss coefficients to apply to each level
:param polygon_map: Used as mask for fill, outline and vertex. Shape [batch, height, width, channels (3)]
:return: error
"""
height, width, _ = gt.get_shape().as_list()[1:]
with tf.name_scope("euclidean_error"):
# Compute weight mask
cropped_polygon_map = tf.image.resize_image_with_crop_or_pad(polygon_map, height, width)
# TODO: normalize correction_weights
correction_weights = 1 / (
tf.reduce_sum(tf.reduce_sum(cropped_polygon_map, axis=1), axis=1) + tf.keras.backend.epsilon())
weigths = tf.constant(
[disp_loss_params["fill_coef"], disp_loss_params["edge_coef"], disp_loss_params["vertex_coef"]],
dtype=tf.float32)
corrected_weights = weigths * correction_weights
corrected_weights = tf.expand_dims(tf.expand_dims(corrected_weights, axis=1), axis=1)
weighted_mask = tf.reduce_sum(cropped_polygon_map * corrected_weights, axis=-1)
weighted_mask = tf.expand_dims(weighted_mask, axis=1) # Add levels dimension
# Compute errors
gt = tf.expand_dims(gt, axis=1) # Add levels dimension
pixelwise_euclidean_error = tf.reduce_sum(tf.square(gt - preds), axis=-1)
masked_pixelwise_euclidean_error = pixelwise_euclidean_error * weighted_mask
# Sum errors
summed_error = tf.reduce_sum(masked_pixelwise_euclidean_error, axis=0) # Batch sum
summed_error = tf.reduce_sum(summed_error, axis=-1) # Col/Width sum
summed_error = tf.reduce_sum(summed_error, axis=-1) # Row/Height sum
summed_error = summed_error * level_loss_coefs # Apply Level loss coefficients
summed_error = tf.reduce_sum(summed_error)
# Sum weights
summed_weighted_mask = tf.reduce_sum(weighted_mask)
loss = summed_error / (summed_weighted_mask + tf.keras.backend.epsilon())
return loss
def segmentation_error(seg_gt, seg_pred_logits, level_loss_coefs, seg_loss_params):
"""
:param seg_gt:
:param seg_pred_logits:
:param level_loss_coefs:
:return:
"""
# print("--- segmentation_error ---")
_, levels, height, width, _ = seg_pred_logits.get_shape().as_list()
# Crop seg_gt to match resolution of seg_pred_logits
seg_gt = tf.image.resize_image_with_crop_or_pad(seg_gt, height, width)
# Add background class to gt segmentation
if tf_utils.get_tf_version() == "1.4.0":
seg_gt_bg = tf.reduce_prod(1 - seg_gt, axis=-1,
keep_dims=True) # Equals 0 if pixel is either fill, outline or vertex. Equals 1 otherwise
else:
seg_gt_bg = tf.reduce_prod(1 - seg_gt, axis=-1,
keepdims=True) # Equals 0 if pixel is either fill, outline or vertex. Equals 1 otherwise
seg_gt = tf.concat([seg_gt_bg, seg_gt], axis=-1)
# Compute weight mask
# class_sums = tf.reduce_sum(tf.reduce_sum(seg_gt, axis=1), axis=1)
# seg_class_balance_weights = 1 / (
# class_sums + tf.keras.backend.epsilon())
seg_class_weights = tf.constant([[seg_loss_params["background_coef"], seg_loss_params["fill_coef"],
seg_loss_params["edge_coef"], seg_loss_params["vertex_coef"]]],
dtype=tf.float32)
# balanced_class_weights = seg_class_balance_weights * seg_class_weights
balanced_class_weights = seg_class_weights
balanced_class_weights = tf.expand_dims(balanced_class_weights, axis=1) # Add levels dimension
balanced_class_weights = tf.tile(balanced_class_weights, multiples=[1, levels, 1]) # Repeat on levels dimension
level_loss_coefs = tf.expand_dims(level_loss_coefs, axis=-1) # Add channels dimension
final_weights = balanced_class_weights * level_loss_coefs
final_weights = tf.expand_dims(tf.expand_dims(final_weights, axis=2), axis=2) # Add spatial dimensions
# Adapt seg_gt shape to seg_pred_logits
seg_gt = tf.expand_dims(seg_gt, axis=1) # Add levels dimension
seg_gt = tf.tile(seg_gt, multiples=[1, levels, 1, 1, 1]) # Add levels dimension
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=seg_gt, logits=seg_pred_logits)
# Now apply the various weights
weighted_loss = loss * final_weights
final_loss = tf.reduce_mean(weighted_loss)
return final_loss
def laplacian_penalty(preds, level_loss_coefs):
in_channels = preds.shape[-1]
with tf.name_scope("laplacian_penalty"):
laplace_k = tf_utils.make_depthwise_kernel([[0.5, 1.0, 0.5],
[1.0, -6., 1.0],
[0.5, 1.0, 0.5]], in_channels)
# Reshape preds to respect the input format of the depthwise_conv2d op
shape = [preds.shape[0] * preds.shape[1]] + preds.get_shape().as_list()[2:]
reshaped_preds = tf.reshape(preds, shape)
laplacians = tf.nn.depthwise_conv2d(reshaped_preds, laplace_k, [1, 1, 1, 1], padding='SAME')
penalty_map = tf.reduce_sum(tf.square(laplacians), axis=-1)
# Reshape penalty_map to shape compatible with preds
shape = preds.get_shape().as_list()[:-1]
reshaped_penalty_map = tf.reshape(penalty_map, shape)
# Compute mean penalty per level over spatial dimension as well as over batches
level_penalties = tf.reduce_mean(reshaped_penalty_map, axis=0) # Batch mean
level_penalties = tf.reduce_mean(level_penalties, axis=-1) # Col/Width mean
level_penalties = tf.reduce_mean(level_penalties, axis=-1) # Row/Height mean
# Apply level_loss_coefs
weighted_penalties = level_penalties * level_loss_coefs
penalty = tf.reduce_mean(weighted_penalties) # Levels mean
return penalty
def main(_):
batch_size = 1
levels = 2
patch_inner_res = 3
patch_outer_res = 5
disp_ = tf.placeholder(tf.float32, [batch_size, patch_inner_res, patch_inner_res, 2])
disps = tf.placeholder(tf.float32, [batch_size, levels, patch_inner_res, patch_inner_res, 2])
seg_ = tf.placeholder(tf.float32, [batch_size, patch_inner_res, patch_inner_res, 3])
seg_logits = tf.placeholder(tf.float32, [batch_size, levels, patch_inner_res, patch_inner_res, 3])
level_loss_coefs = tf.placeholder(tf.float32, [levels])
mask = tf.placeholder(tf.float32, [batch_size, patch_outer_res, patch_outer_res, 3])
disp_loss = displacement_error(disp_, disps, level_loss_coefs, mask)
seg_loss = segmentation_error(seg_, seg_logits, level_loss_coefs)
penalty = laplacian_penalty(disps, level_loss_coefs)
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
with tf.Session() as sess:
sess.run(init_op)
disp_gt = np.zeros([batch_size, patch_inner_res, patch_inner_res, 2])
disp_gt[0, 0, 0, 0] = 1
disp_preds = np.zeros([batch_size, levels, patch_inner_res, patch_inner_res, 2])
disp_preds[0, 0, 0, 0, 0] = 1
disp_preds[0, 1, 0, 0, 0] = 1
seg_gt = np.zeros([batch_size, patch_inner_res, patch_inner_res, 3])
# seg_gt += 0.5
seg_gt[0, 0, 0, 0] = 1.0
seg_gt[0, 0, 1, 1] = 1.0
seg_gt[0, 0, 2, 2] = 1.0
seg_gt[0, 1, 0, 0] = 1.0
seg_gt[0, 1, 1, 1] = 1.0
seg_gt[0, 1, 2, 2] = 1.0
seg_pred_logits = np.zeros([batch_size, levels, patch_inner_res, patch_inner_res, 3])
seg_pred_logits += -100
seg_pred_logits[0, 0, 0, 0, 0] = 100
seg_pred_logits[0, 0, 0, 1, 1] = 100
seg_pred_logits[0, 0, 0, 2, 2] = -100
seg_pred_logits[0, 1, 0, 0, 0] = 100
seg_pred_logits[0, 1, 0, 1, 1] = 100
seg_pred_logits[0, 1, 0, 2, 2] = -100
seg_pred_logits[0, 0, 1, 0, 0] = 100
seg_pred_logits[0, 0, 1, 1, 1] = 100
seg_pred_logits[0, 0, 1, 2, 2] = -100
seg_pred_logits[0, 1, 1, 0, 0] = 100
seg_pred_logits[0, 1, 1, 1, 1] = 100
seg_pred_logits[0, 1, 1, 2, 2] = -100
coefs = np.array([1, 0.5])
poly_mask = np.zeros([batch_size, patch_outer_res, patch_outer_res, 3])
poly_mask[0, 1, 1, 0] = 1
computed_disp_loss, computed_seg_loss, computed_penalty = sess.run(
[disp_loss, seg_loss, penalty], feed_dict={disp_: disp_gt, disps: disp_preds,
seg_: seg_gt, seg_logits: seg_pred_logits,
level_loss_coefs: coefs, mask: poly_mask})
print("computed_disp_loss:")
print(computed_disp_loss)
print("computed_seg_loss:")
print(computed_seg_loss)
print("computed_penalty:")
print(computed_penalty)
if __name__ == '__main__':
tf.app.run(main=main)
| 9,420 | 44.73301 | 125 | py |