content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
from django.conf.urls import url
from . import views
urlpatterns = [
url(
r"^control/event/(?P<organizer>[^/]+)/(?P<event>[^/]+)/key-file-upload/$",
views.KeyFileUploadView.as_view(),
name="key_file_upload",
),
]
| 19.076923 | 82 | 0.58871 | [
"MIT"
] | ayanginet/pretix-attestation-placeholder-plugin | pretix_attestation_plugin/urls.py | 248 | Python |
import time
from check_lang import check_py,check_rb,check_j,check_c,check_cpp
from flask import Flask, request, jsonify
from flask_cors import CORS, cross_origin
import subprocess
import json
from json import JSONEncoder
from main import predict
app = Flask(__name__)
CORS(app)
@app.route("/")
def hello():
return '<form action="/check" method="POST"><input name="code" size="135"><input type="submit" value="Code Here"></form>'
@app.route("/check", methods=['POST'])
def echo():
codes = []
filename = str(int(time.time()))
dataDict = json.loads(request.data)
# print dataDict
# print "------------"
with open('code/'+filename,'w+') as outfile:
outfile.write(str(dataDict['sc']))
codes.append(int(check_c("code/"+filename)))
codes.append(int(check_cpp("code/"+filename)))
codes.append(int(check_py("code/"+filename)))
codes.append(int(check_rb("code/"+filename)))
codes.append(1)
print codes
zero = 0
count = 0
correct_count = 0
for code in codes:
count = count+1
if code==0:
zero = zero + 1
correct_count = count
print zero
if(zero == 1):
if(correct_count==1):
jsonString = {'cpp': 0.0, 'ruby': 0.0, 'c': 1.0, 'py': 0.0, 'java': 0.0}
return jsonify(jsonString)
elif(correct_count==2):
jsonString = {'cpp': 1.0, 'ruby': 0.0, 'c': 0.0, 'py': 0.0, 'java': 0.0}
return jsonify(jsonString)
elif(correct_count==3):
jsonString = {'cpp': 0.0, 'ruby': 0.0, 'c': 0.0, 'py': 1.0, 'java': 0.0}
return jsonify(jsonString)
elif(correct_count==4):
jsonString = {'cpp': 0.0, 'ruby': 1.0, 'c': 0.0, 'py': 0.0, 'java': 0.0}
return jsonify(jsonString)
else:
x = predict(dataDict['sc'])
print x
# return JSONEncoder().encode(x)
return jsonify({'cpp': round(x['cpp'], 2), 'ruby': round(x['ruby'], 2), 'c': round(x['c'], 2), 'py': round(x['py'], 2), 'java': round(x['java'], 2)})
#if score of cpp is eqgreater than 0.5 then run it to check if it runs then cpp else java
# sa = []
# score_cpp = x['cpp']
# score_ruby = x['ruby']
# score_c = x['c']
# score_py = x['py']
# score_java = x['java']
#
# sa.append(score_c)
# sa.append(score_cpp)
# sa.append(score_java)
# sa.append(score_py)
# sa.append(score_ruby)
#
# print sa
# return ''.join([str(code) for code in codes])+" "+str(x)
if __name__ == "__main__":
app.run(host= '0.0.0.0')
| 28.473118 | 157 | 0.554003 | [
"MIT",
"Unlicense"
] | poke19962008/Source-Code-Classifier | server/home.py | 2,648 | Python |
import numpy as np
import matplotlib.pyplot as plt
from UTILS.Calculus import Calculus
from UTILS.SetAxisLimit import SetAxisLimit
from UTILS.Tools import Tools
from UTILS.Errors import Errors
import sys
# Theoretical background https://arxiv.org/abs/1401.5176
# Mocak, Meakin, Viallet, Arnett, 2014, Compressible Hydrodynamic Mean-Field #
# Equations in Spherical Geometry and their Application to Turbulent Stellar #
# Convection Data #
class InternalEnergyEquation(Calculus, SetAxisLimit, Tools, Errors, object):
def __init__(self, filename, ig, fext, intc, tke_diss, data_prefix):
super(InternalEnergyEquation, self).__init__(ig)
# load data to structured array
eht = self.customLoad(filename)
# load grid
xzn0 = self.getRAdata(eht, 'xzn0')
nx = self.getRAdata(eht, 'nx')
# pick equation-specific Reynolds-averaged mean fields according to:
# https://github.com/mmicromegas/ransX/blob/master/DOCS/ransXimplementationGuide.pdf
dd = self.getRAdata(eht, 'dd')[intc]
ux = self.getRAdata(eht, 'ux')[intc]
pp = self.getRAdata(eht, 'pp')[intc]
ddux = self.getRAdata(eht, 'ddux')[intc]
ddei = self.getRAdata(eht, 'ddei')[intc]
ddeiux = self.getRAdata(eht, 'ddeiux')[intc]
divu = self.getRAdata(eht, 'divu')[intc]
ppdivu = self.getRAdata(eht, 'ppdivu')[intc]
ddenuc1 = self.getRAdata(eht, 'ddenuc1')[intc]
ddenuc2 = self.getRAdata(eht, 'ddenuc2')[intc]
# store time series for time derivatives
t_timec = self.getRAdata(eht, 'timec')
t_dd = self.getRAdata(eht, 'dd')
t_ddei = self.getRAdata(eht, 'ddei')
t_fht_ei = t_ddei / t_dd
# construct equation-specific mean fields
fht_ux = ddux / dd
fht_ei = ddei / dd
fei = ddeiux - ddux * ddei / dd
##########################
# INTERNAL ENERGY EQUATION
##########################
# LHS -dq/dt
self.minus_dt_dd_fht_ei = -self.dt(t_dd * t_fht_ei, xzn0, t_timec, intc)
# LHS -div dd fht_ux fht_ei
self.minus_div_dd_fht_ux_fht_ei = -self.Div(dd * fht_ux * fht_ei, xzn0)
# RHS -div fei
self.minus_div_fei = -self.Div(fei, xzn0)
# RHS -div ftt (not included) heat flux
self.minus_div_ftt = -np.zeros(nx)
# RHS -P d = - pp Div ux
self.minus_pp_div_ux = -pp * self.Div(ux, xzn0)
# RHS -Wp = -eht_ppf_df
self.minus_eht_ppf_df = -(ppdivu - pp * divu)
# RHS source + dd enuc
self.plus_dd_fht_enuc = ddenuc1 + ddenuc2
# RHS dissipated turbulent kinetic energy
self.plus_disstke = +tke_diss
# -res
self.minus_resEiEquation = -(self.minus_dt_dd_fht_ei + self.minus_div_dd_fht_ux_fht_ei +
self.minus_div_fei + self.minus_div_ftt + self.minus_pp_div_ux + self.minus_eht_ppf_df +
self.plus_dd_fht_enuc + self.plus_disstke)
##############################
# END INTERNAL ENERGY EQUATION
##############################
# assign global data to be shared across whole class
self.data_prefix = data_prefix
self.xzn0 = xzn0
self.fht_ei = fht_ei
self.fext = fext
def plot_ei(self, LAXIS, bconv, tconv, xbl, xbr, ybu, ybd, ilg):
"""Plot mean Favrian internal energy stratification in the model"""
if self.ig != 1 and self.ig != 2:
print("ERROR(InternalEnergyEquation.py):" + self.errorGeometry(self.ig))
sys.exit()
# load x GRID
grd1 = self.xzn0
# load DATA to plot
plt1 = self.fht_ei
# create FIGURE
plt.figure(figsize=(7, 6))
# format AXIS, make sure it is exponential
plt.gca().yaxis.get_major_formatter().set_powerlimits((0, 0))
# set plot boundaries
to_plot = [plt1]
self.set_plt_axis(LAXIS, xbl, xbr, ybu, ybd, to_plot)
# plot DATA
plt.title(r'internal energy')
plt.plot(grd1, plt1, color='brown', label=r'$\widetilde{\varepsilon}_I$')
# convective boundary markers
plt.axvline(bconv, linestyle='--', linewidth=0.7, color='k')
plt.axvline(tconv, linestyle='--', linewidth=0.7, color='k')
# define and show x/y LABELS
if self.ig == 1:
setxlabel = r"x (cm)"
setylabel = r"$\widetilde{\varepsilon}_I$ (erg g$^{-1}$)"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
elif self.ig == 2:
setxlabel = r"r (cm)"
setylabel = r"$\widetilde{\varepsilon}_I$ (erg g$^{-1}$)"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
# show LEGEND
plt.legend(loc=ilg, prop={'size': 18})
# display PLOT
plt.show(block=False)
# save PLOT
if self.fext == 'png':
plt.savefig('RESULTS/' + self.data_prefix + 'mean_ei.png')
elif self.fext == 'eps':
plt.savefig('RESULTS/' + self.data_prefix + 'mean_ei.eps')
def plot_ei_equation(self, LAXIS, bconv, tconv, xbl, xbr, ybu, ybd, ilg):
"""Plot internal energy equation in the model"""
if self.ig != 1 and self.ig != 2:
print("ERROR(InternalEnergyEquation.py):" + self.errorGeometry(self.ig))
sys.exit()
# load x GRID
grd1 = self.xzn0
lhs0 = self.minus_dt_dd_fht_ei
lhs1 = self.minus_div_dd_fht_ux_fht_ei
rhs0 = self.minus_div_fei
rhs1 = self.minus_div_ftt
rhs2 = self.minus_pp_div_ux
rhs3 = self.minus_eht_ppf_df
rhs4 = self.plus_dd_fht_enuc
rhs5 = self.plus_disstke
res = self.minus_resEiEquation
# create FIGURE
plt.figure(figsize=(7, 6))
# format AXIS, make sure it is exponential
plt.gca().yaxis.get_major_formatter().set_powerlimits((0, 0))
# set plot boundaries
to_plot = [lhs0, lhs1, rhs0, rhs1, rhs2, rhs3, rhs4, rhs5, res]
self.set_plt_axis(LAXIS, xbl, xbr, ybu, ybd, to_plot)
# plot DATA
plt.title('internal energy equation')
if self.ig == 1:
plt.plot(grd1, lhs0, color='#FF6EB4', label=r"$-\partial_t (\overline{\rho} \widetilde{\epsilon}_I )$")
plt.plot(grd1, lhs1, color='k', label=r"$-\nabla_x (\overline{\rho}\widetilde{u}_x \widetilde{\epsilon}_I$)")
plt.plot(grd1, rhs0, color='#FF8C00', label=r"$-\nabla_x f_I $")
plt.plot(grd1, rhs1, color='c', label=r"$-\nabla_x f_T$ (not incl.)")
plt.plot(grd1, rhs2, color='#802A2A', label=r"$-\bar{P} \bar{d}$")
plt.plot(grd1, rhs3, color='r', label=r"$-W_P$")
plt.plot(grd1, rhs4, color='b', label=r"$+\overline{\rho}\widetilde{\epsilon}_{nuc}$")
plt.plot(grd1, rhs5, color='m', label=r"$+\varepsilon_k$")
plt.plot(grd1, res, color='k', linestyle='--', label=r"res $\sim N_\epsilon$")
elif self.ig == 2:
plt.plot(grd1, lhs0, color='#FF6EB4', label=r"$-\partial_t (\overline{\rho} \widetilde{\epsilon}_I )$")
plt.plot(grd1, lhs1, color='k', label=r"$-\nabla_r (\overline{\rho}\widetilde{u}_r \widetilde{\epsilon}_I$)")
plt.plot(grd1, rhs0, color='#FF8C00', label=r"$-\nabla_r f_I $")
plt.plot(grd1, rhs1, color='c', label=r"$-\nabla_r f_T$ (not incl.)")
plt.plot(grd1, rhs2, color='#802A2A', label=r"$-\bar{P} \bar{d}$")
plt.plot(grd1, rhs3, color='r', label=r"$-W_P$")
plt.plot(grd1, rhs4, color='b', label=r"$+\overline{\rho}\widetilde{\epsilon}_{nuc}$")
plt.plot(grd1, rhs5, color='m', label=r"$+\varepsilon_k$")
plt.plot(grd1, res, color='k', linestyle='--', label=r"res $\sim N_\epsilon$")
# convective boundary markers
plt.axvline(bconv, linestyle='--', linewidth=0.7, color='k')
plt.axvline(tconv, linestyle='--', linewidth=0.7, color='k')
# define and show x/y LABELS
if self.ig == 1:
setxlabel = r"x (cm)"
setylabel = r"erg cm$^{-3}$ s$^{-1}$"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
elif self.ig == 2:
setxlabel = r"r (cm)"
setylabel = r"erg cm$^{-3}$ s$^{-1}$"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
# show LEGEND
plt.legend(loc=ilg, prop={'size': 10}, ncol=2)
# display PLOT
plt.show(block=False)
# save PLOT
if self.fext == 'png':
plt.savefig('RESULTS/' + self.data_prefix + 'ei_eq.png')
elif self.fext == 'eps':
plt.savefig('RESULTS/' + self.data_prefix + 'ei_eq.eps') | 36.874477 | 125 | 0.572336 | [
"BSD-2-Clause"
] | mmicromegas/ransX | EQUATIONS/InternalEnergyEquation.py | 8,813 | Python |
#-----------------------------------------------------------------------------
# Copyright (c) 2015-2017, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
# netCDF4 (tested with v.1.1.9) has some hidden imports
hiddenimports = ['netCDF4.utils', 'netcdftime']
| 39.538462 | 78 | 0.529183 | [
"MIT"
] | JohnWJackson/arcadePython | venv/Lib/site-packages/PyInstaller/hooks/hook-netCDF4.py | 514 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
# Copyright (c) Ostap developers.
# =============================================================================
# @file test_fitting_efficiency.py
# Test module for ostap/fitting/efficiency.py
# =============================================================================
""" Test module for ostap/fitting/efficiency.py
"""
# =============================================================================
__author__ = "Ostap developers"
__all__ = () ## nothing to import
# =============================================================================
import ROOT, random, math
import ostap.fitting.roofit
import ostap.fitting.models as Models
from ostap.core.core import cpp, VE, dsID, Ostap
from ostap.logger.utils import rooSilent
from ostap.fitting.efficiency import Efficiency1D
from ostap.utils.timing import timing
from ostap.plotting.canvas import use_canvas
from ostap.utils.utils import wait
# =============================================================================
# logging
# =============================================================================
from ostap.logger.logger import getLogger
if '__main__' == __name__ or '__builtin__' == __name__ :
logger = getLogger ( 'test_fitting_efficiency' )
else :
logger = getLogger ( __name__ )
# =============================================================================
## make
x = ROOT.RooRealVar ( 'x', 'test' , 0 , 10 )
xmin , xmax = x.minmax()
acc = ROOT.RooCategory( 'cut','cut')
acc.defineType('accept',1)
acc.defineType('reject',0)
varset = ROOT.RooArgSet ( x , acc )
ds = ROOT.RooDataSet ( dsID() , 'test data' , varset )
eff0 = Models.Monotonic_pdf ( 'E0' , xvar = x , power = 3 , increasing = True )
eff0.phis = 3.1415/1 , 3.1415/2 , 3.1415/3
margin = 1.25
emax = margin * eff0 ( x.getMax() )
N = 20000
for i in range ( N ) :
xv = random.uniform ( xmin , xmax )
x.setVal ( xv )
ev = random.uniform ( 0 , emax )
if eff0( xv ) > ev : acc.setIndex(1)
else : acc.setIndex(0)
ds.add ( varset )
np = 20
dx = (xmax-xmin)/np
points = [ dx * i for i in range ( np + 1 ) ]
# =================================================================================
## make comparison table
def make_table ( func , title , prefix = "# ") :
rows = [ ( 'x' , 'fitted eff [%]' , 'true eff [%]' , 'delta [%]' ) ]
for p in points :
e1 = 100 * func ( p , error = True )
e2 = 100 * eff0 ( p ) / emax
d = e1 - e2
row = "%4.2f" % p , \
"%s" % e1.toString ( '(%5.2f+-%4.2f)' ) ,\
"%.2f" % e2 ,\
"%s" % d .toString ( '(%5.2f+-%4.2f)' )
rows.append ( row )
from ostap.logger.table import table
return table ( rows , title = title , prefix = prefix )
# =============================================================================
# use some PDF to parameterize efficiciency
def test_pdf () :
logger = getLogger ( 'test_pdf' )
effPdf = Models.Monotonic_pdf ( 'P6' , xvar = x , power = 4 , increasing = True )
maxe = margin * effPdf ( xmax )
s0 = min ( 1.0 / emax , 1.0 / maxe )
scale = ROOT.RooRealVar ( 'scaleX' , 'scaleX' , s0 , 0.2 * s0 , 5.0 * s0 )
eff2 = Efficiency1D ( 'E2' , effPdf , cut = acc , scale = scale )
r2 = eff2.fitTo ( ds )
logger.info ( "Fit result using-Monotonic_pdf \n%s" % r2.table ( prefix = "# ") )
logger.info ( "Compare with true efficiency (using Monotonic_pdf)\n%s" % make_table (
eff2 , title = 'using Monotonic_pdf') )
with wait ( 2 ) , use_canvas ( 'test_pdf' ) :
f2 = eff2.draw ( ds , nbins = 25 )
# =============================================================================
# use some functions to parameterize efficiciency
def test_vars1 () :
from ostap.fitting.roofuncs import BernsteinPoly as BP
logger = getLogger ( 'test_vars1' )
f = BP ( 'G' , xvar = x , power = 4 )
f.pars = 0.2 , 0.2 , 0.2 , 0.2
eff2 = Efficiency1D ( 'E3' , f.fun , cut = acc , xvar = x )
r2 = eff2.fitTo ( ds )
logger.info ( "Fit result using-BernsteinPoly \n%s" % r2.table ( prefix = "# ") )
logger.info ( "Compare with true efficiency (using BernsteinPoly)\n%s" % make_table (
eff2 , title = 'using BernsteinPoly') )
with wait ( 2 ) , use_canvas ( 'test_pdf' ) :
f2 = eff2.draw ( ds , nbins = 25 )
# =============================================================================
# use some functions to parameterize efficiciency
def test_vars2 () :
logger = getLogger ( 'test_vars2' )
from ostap.fitting.roofuncs import MonotonicPoly as MP
f = MP ( 'G' , xvar = x , increasing = True , power = 4 )
f.pars = 0.6 , 0.8 , -0.1 , -0.6
f.a = 0.06
f.b = 2.72
f.a.release ()
f.b.release ()
eff2 = Efficiency1D ( 'E4' , f , cut = acc , xvar = x )
r2 = eff2.fitTo ( ds )
logger.info ( "Fit result using-MonotonicPoly \n%s" % r2.table ( prefix = "# ") )
logger.info ( "Compare with true efficiency (using MonotonicPoly)\n%s" % make_table (
eff2 , title = 'using MonotonicPoly') )
with wait ( 2 ) , use_canvas ( 'test_pdf' ) :
f2 = eff2.draw ( ds , nbins = 25 )
# =============================================================================
# use some functions to parameterize efficiciency
def test_vars3 () :
logger = getLogger ( 'test_vars3' )
a = ROOT.RooRealVar ( 'A', 'a' , 0.05 , 0 , 1 )
b = ROOT.RooRealVar ( 'B', 'b' , 0.02 , -0.05 , 0.1 )
c = ROOT.RooRealVar ( 'C', 'c' , 0.005 , 0 , 0.1 )
import ostap.fitting.roofuncs as R
from ostap.fitting.funbasic import Fun1D
X = Fun1D ( x , xvar = x , name = 'X' )
##F = (X**2) * c + X * b + a
F = a + b * X + c * X**2
eff2 = Efficiency1D ( 'E5' , F , cut = acc , xvar = x )
r2 = eff2.fitTo ( ds )
logger.info ( "Fit result using-Fun1D \n%s" % r2.table ( prefix = "# ") )
logger.info ( "Compare with true efficiency (using Fun1D)\n%s" % make_table (
eff2 , title = 'using Fnu1D') )
with wait ( 2 ) , use_canvas ( 'test_vars3' ) :
f2 = eff2.draw ( ds , nbins = 25 )
# =============================================================================
if '__main__' == __name__ :
with timing ("PDF" , logger ) :
test_pdf ()
with timing ("Vars1" , logger ) :
test_vars1 ()
with timing ("Vars2" , logger ) :
test_vars2 ()
with timing ("Vars3" , logger ) :
test_vars3 ()
# =============================================================================
## The END
# =============================================================================
| 34.657143 | 89 | 0.436796 | [
"BSD-3-Clause"
] | MohamedElashri/ostap | ostap/fitting/tests/test_fitting_efficiency.py | 7,278 | Python |
#
# @file TestConstraint_newSetters.py
# @brief Constraint unit tests for new set function API
#
# @author Akiya Jouraku (Python conversion)
# @author Sarah Keating
#
# $Id$
# $HeadURL$
#
# This test file was converted from src/sbml/test/TestConstraint_newSetters.c
# with the help of conversion sciprt (ctest_converter.pl).
#
#<!---------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2009 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
#--------------------------------------------------------------------------->*/
import sys
import unittest
import libsbml
class TestConstraint_newSetters(unittest.TestCase):
C = None
def setUp(self):
self.C = libsbml.Constraint(2,4)
if (self.C == None):
pass
pass
def tearDown(self):
self.C = None
pass
def test_Constraint_setMath1(self):
math = libsbml.parseFormula("2 * k")
i = self.C.setMath(math)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( self.C.getMath() != math )
self.assertEqual( True, self.C.isSetMath() )
i = self.C.setMath(None)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( self.C.getMath() == None )
self.assertEqual( False, self.C.isSetMath() )
math = None
pass
def test_Constraint_setMath2(self):
math = libsbml.ASTNode(libsbml.AST_TIMES)
i = self.C.setMath(math)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
self.assertEqual( False, self.C.isSetMath() )
math = None
pass
def test_Constraint_setMessage1(self):
node = libsbml.XMLNode()
i = self.C.setMessage(node)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
self.assert_( self.C.isSetMessage() == False )
i = self.C.unsetMessage()
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, self.C.isSetMessage() )
if (self.C.getMessage() != None):
pass
node = None
pass
def test_Constraint_setMessage2(self):
text = libsbml.XMLNode.convertStringToXMLNode(" Some text ",None)
triple = libsbml.XMLTriple("p", "http://www.w3.org/1999/xhtml", "")
att = libsbml.XMLAttributes()
xmlns = libsbml.XMLNamespaces()
xmlns.add( "http://www.w3.org/1999/xhtml", "")
p = libsbml.XMLNode(triple,att,xmlns)
p.addChild(text)
triple1 = libsbml.XMLTriple("message", "", "")
att1 = libsbml.XMLAttributes()
node = libsbml.XMLNode(triple1,att1)
node.addChild(p)
i = self.C.setMessage(node)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( self.C.isSetMessage() == True )
i = self.C.unsetMessage()
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, self.C.isSetMessage() )
if (self.C.getMessage() != None):
pass
node = None
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestConstraint_newSetters))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
| 32.947368 | 79 | 0.660011 | [
"BSD-3-Clause"
] | dchandran/evolvenetworks | external/sbml/bindings/python/test/sbml/TestConstraint_newSetters.py | 3,756 | Python |
#!/usr/bin/env python3
import sys
import os
import argparse
scriptpath = os.path.abspath(os.path.dirname(__file__))
includepath = os.path.dirname(scriptpath)
sys.path.insert(0, includepath)
from audio.audiofilefactory import AudioFileFactory
from audio.audioconversionservice import AudioConversionService
from filesystem.filelist import FileList
parser = argparse.ArgumentParser(description="Convert music files", epilog="File types are auto-derived from the filename extensions.")
parser.add_argument("source_path", help="The source path")
parser.add_argument("destination_path", help="The destination path")
parser.add_argument("list_of_favourites", help="The list of favourites")
args = parser.parse_args()
source_path = args.source_path
destination_path = args.destination_path
list_of_favourites = args.list_of_favourites
with open(list_of_favourites) as f:
content = f.readlines()
content = [x.strip() for x in content]
factory = AudioFileFactory()
for favourite in content:
statvfs = os.statvfs(destination_path)
free_space = statvfs.f_bavail * statvfs.f_bsize
print("Space left: " + str(free_space / 1024 / 1024 / 1024) + " Gb")
if free_space < 700 * 1024 * 1024:
print("Skipping " + favourite + ", less than 700 Mb left on device (" + str(free_space / 1024 / 1024) + " Mb)")
continue
target_dir = os.path.join(destination_path, favourite)
if os.path.isdir(target_dir):
print("Skipping " + favourite + ", path already exists")
continue
os.mkdir(target_dir)
list = FileList(None, factory)
list.add_path_to_list(os.path.join(source_path, favourite))
for f in list:
source_file_path = f.get_path()
destination_file_path = os.path.join(target_dir, os.path.splitext(os.path.basename(source_file_path))[0] + ".wav")
destination_file = factory.create_file(destination_file_path)
AudioConversionService().convert_audio_file(f, destination_file)
| 43.295455 | 135 | 0.76378 | [
"BSD-3-Clause"
] | redmond-penguin/musicplayer | bin/convertfavourites.py | 1,905 | Python |
from pymavlink import mavutil
#import time
mavutil.set_dialect("video_monitor")
# create a connection to FMU
hoverGames = mavutil.mavlink_connection("/dev/ttymxc2", baud=921600)
# wait for the heartbeat message to find the system id
hoverGames.wait_heartbeat()
print("Heartbeat from system (system %u component %u)" %(hoverGames.target_system, hoverGames.target_component))
while (True) :
msg = hoverGames.recv_match(type='VIDEO_MONITOR', blocking=True)
#check that the message is valid before attempting to use it
if not msg:
print('No message!\n')
continue
if msg.get_type() == "BAD_DATA":
if mavutil.all_printable(msg.data):
sys.stdout.write(msg.data)
sys.stdout.flush()
else:
#Message is valid, so use the attribute
print('Info: %s' % msg.info)
print('Latitude : %d' % msg.lat)
print('Longitude: %d' % msg.lon)
print('No.people: %d' % msg.no_people)
print('Confidence: %f' % msg.confidence)
print('\n')
#time.sleep(1.0):
| 30.742857 | 112 | 0.643123 | [
"BSD-3-Clause"
] | dmdobrea/HoverGames_Challenge2 | 02_commCustom/receiveCustomMavlinkMSG.py | 1,076 | Python |
import numpy as np
from .utils import make_dir
class Evaluater(object):
def __init__(self, logger, size, original_size, tag='paper_figure'):
self.pixel_spaceing = 0.1
self.tag = tag
make_dir(tag)
self.tag += '/'
self.logger = logger
self.scale_rate_y = original_size[0] / size[0]
self.scale_rate_x = original_size[1] / size[1]
self.RE_list = list()
self.recall_radius = [2, 2.5, 3, 4] # 2mm etc
self.recall_rate = list()
self.Attack_RE_list = list()
self.Defend_RE_list = list()
self.dict_Attack = dict()
self.dict_Defend = dict()
self.total_list = dict()
self.mode_list = [0, 1, 2, 3]
self.mode_dict = {0: "Iterative FGSM", 1: "Adaptive Iterative FGSM", \
2: "Adaptive_Rate", 3: "Proposed"}
for mode in self.mode_list:
self.dict_Defend[mode] = dict()
self.dict_Attack[mode] = dict()
self.total_list[mode] = list()
self.best_mre = 100.0
def reset(self):
self.RE_list.clear()
for mode in self.mode_list:
self.dict_Defend[mode] = dict()
self.dict_Attack[mode] = dict()
self.total_list[mode] = list()
self.Attack_RE_list.clear()
self.Defend_RE_list.clear()
def record(self, pred, landmark):
# n = batchsize = 1
# pred : list[ c(y) ; c(x) ]
# landmark: list [ (x , y) * c]
c = pred[0].shape[0]
diff = np.zeros([c, 2], dtype=float) # y, x
for i in range(c):
diff[i][0] = abs(pred[0][i] - landmark[i][1]) * self.scale_rate_y
diff[i][1] = abs(pred[1][i] - landmark[i][0]) * self.scale_rate_x
Radial_Error = np.sqrt(np.power(diff[:, 0], 2) + np.power(diff[:, 1], 2))
Radial_Error *= self.pixel_spaceing
self.RE_list.append(Radial_Error)
# for i in range(len(Radial_Error)):
# if Radial_Error[i] > 10:
# print("Landmark {} RE {}".format(i, Radial_Error[i]))
# if Radial_Error.max() > 10:
# return Radial_Error.argmax()
return None
def record_attack(self, pred, landmark, attack_list, mode=0, iteration=0):
# n = batchsize = 1
# pred : list[ c(y) ; c(x) ]
# landmark: list [ (x , y) * c]
assert (mode in [0, 1, 2, 3])
c = pred[0].shape[0]
diff = np.zeros([c, 2], dtype=float) # y, x
attack_temp = list()
defend_temp = list()
for i in range(c):
diff[i][0] = abs(pred[0][i] - landmark[i][1]) * self.scale_rate_y
diff[i][1] = abs(pred[1][i] - landmark[i][0]) * self.scale_rate_x
Radial_Error = np.sqrt(np.power(diff[i, 0], 2) + np.power(diff[i, 1], 2))
if i in attack_list:
attack_temp.append([i, Radial_Error * self.pixel_spaceing])
else:
defend_temp.append([i, Radial_Error * self.pixel_spaceing])
if iteration not in self.dict_Attack[mode].keys():
self.dict_Attack[mode][iteration] = list()
self.dict_Attack[mode][iteration].append(attack_temp)
if iteration not in self.dict_Defend[mode].keys():
self.dict_Defend[mode][iteration] = list()
self.dict_Defend[mode][iteration].append(defend_temp)
def cal_metrics(self, ex=False):
# calculate MRE SDR
temp = np.array(self.RE_list)
Mean_RE_channel = temp.mean(axis=0)
self.logger.info(Mean_RE_channel)
# with open('./tmp/results.csv', 'w') as f:
# writer = csv.writer(f)
# writer.writerow(Mean_RE_channel.tolist())
mre = Mean_RE_channel.mean()
self.logger.info("ALL MRE {}".format(mre))
for radius in self.recall_radius:
total = temp.size
shot = (temp < radius).sum()
self.logger.info("ALL SDR {}mm {}".format\
(radius, shot * 100 / total))
if ex:
return mre, None
return mre
| 37.300885 | 86 | 0.530724 | [
"Apache-2.0"
] | Curli-quan/oneshot-medical-landmark | utils/eval.py | 4,215 | Python |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_log import log as logging
from tempest.common import compute
from tempest.common import waiters
from tempest import config
from tempest import exceptions
from tempest.lib.common import api_microversion_fixture
from tempest.lib.common import api_version_request
from tempest.lib.common import api_version_utils
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import exceptions as lib_exc
import tempest.test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class BaseV2ComputeTest(api_version_utils.BaseMicroversionTest,
tempest.test.BaseTestCase):
"""Base test case class for all Compute API tests."""
force_tenant_isolation = False
# Set this to True in subclasses to create a default network. See
# https://bugs.launchpad.net/tempest/+bug/1844568
create_default_network = False
# TODO(andreaf) We should care also for the alt_manager here
# but only once client lazy load in the manager is done
credentials = ['primary']
@classmethod
def skip_checks(cls):
super(BaseV2ComputeTest, cls).skip_checks()
if not CONF.service_available.nova:
raise cls.skipException("Nova is not available")
api_version_utils.check_skip_with_microversion(
cls.min_microversion, cls.max_microversion,
CONF.compute.min_microversion, CONF.compute.max_microversion)
api_version_utils.check_skip_with_microversion(
cls.volume_min_microversion, cls.volume_max_microversion,
CONF.volume.min_microversion, CONF.volume.max_microversion)
api_version_utils.check_skip_with_microversion(
cls.placement_min_microversion, cls.placement_max_microversion,
CONF.placement.min_microversion, CONF.placement.max_microversion)
@classmethod
def setup_credentials(cls):
# Setting network=True, subnet=True creates a default network
cls.set_network_resources(
network=cls.create_default_network,
subnet=cls.create_default_network)
super(BaseV2ComputeTest, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(BaseV2ComputeTest, cls).setup_clients()
cls.servers_client = cls.os_primary.servers_client
cls.server_groups_client = cls.os_primary.server_groups_client
cls.flavors_client = cls.os_primary.flavors_client
cls.compute_images_client = cls.os_primary.compute_images_client
cls.extensions_client = cls.os_primary.extensions_client
cls.floating_ip_pools_client = cls.os_primary.floating_ip_pools_client
cls.floating_ips_client = cls.os_primary.compute_floating_ips_client
cls.keypairs_client = cls.os_primary.keypairs_client
cls.security_group_rules_client = (
cls.os_primary.compute_security_group_rules_client)
cls.security_groups_client =\
cls.os_primary.compute_security_groups_client
cls.quotas_client = cls.os_primary.quotas_client
cls.compute_networks_client = cls.os_primary.compute_networks_client
cls.limits_client = cls.os_primary.limits_client
cls.volumes_extensions_client =\
cls.os_primary.volumes_extensions_client
cls.snapshots_extensions_client =\
cls.os_primary.snapshots_extensions_client
cls.interfaces_client = cls.os_primary.interfaces_client
cls.fixed_ips_client = cls.os_primary.fixed_ips_client
cls.availability_zone_client = cls.os_primary.availability_zone_client
cls.agents_client = cls.os_primary.agents_client
cls.aggregates_client = cls.os_primary.aggregates_client
cls.services_client = cls.os_primary.services_client
cls.instance_usages_audit_log_client = (
cls.os_primary.instance_usages_audit_log_client)
cls.hypervisor_client = cls.os_primary.hypervisor_client
cls.certificates_client = cls.os_primary.certificates_client
cls.migrations_client = cls.os_primary.migrations_client
cls.security_group_default_rules_client = (
cls.os_primary.security_group_default_rules_client)
cls.versions_client = cls.os_primary.compute_versions_client
if CONF.service_available.cinder:
cls.volumes_client = cls.os_primary.volumes_client_latest
cls.attachments_client = cls.os_primary.attachments_client_latest
cls.snapshots_client = cls.os_primary.snapshots_client_latest
if CONF.service_available.glance:
if CONF.image_feature_enabled.api_v1:
cls.images_client = cls.os_primary.image_client
elif CONF.image_feature_enabled.api_v2:
cls.images_client = cls.os_primary.image_client_v2
else:
raise lib_exc.InvalidConfiguration(
'Either api_v1 or api_v2 must be True in '
'[image-feature-enabled].')
cls._check_depends_on_nova_network()
@classmethod
def _check_depends_on_nova_network(cls):
# Since nova-network APIs were removed from Nova in the Rocky release,
# determine, based on the max version from the version document, if
# the compute API is >Queens and if so, skip tests that rely on
# nova-network.
if not getattr(cls, 'depends_on_nova_network', False):
return
versions = cls.versions_client.list_versions()['versions']
# Find the v2.1 version which will tell us our max version for the
# compute API we're testing against.
for version in versions:
if version['id'] == 'v2.1':
max_version = api_version_request.APIVersionRequest(
version['version'])
break
else:
LOG.warning(
'Unable to determine max v2.1 compute API version: %s',
versions)
return
# The max compute API version in Queens is 2.60 so we cap
# at that version.
queens = api_version_request.APIVersionRequest('2.60')
if max_version > queens:
raise cls.skipException('nova-network is gone')
@classmethod
def resource_setup(cls):
super(BaseV2ComputeTest, cls).resource_setup()
cls.request_microversion = (
api_version_utils.select_request_microversion(
cls.min_microversion,
CONF.compute.min_microversion))
cls.volume_request_microversion = (
api_version_utils.select_request_microversion(
cls.volume_min_microversion,
CONF.volume.min_microversion))
cls.placement_request_microversion = (
api_version_utils.select_request_microversion(
cls.placement_min_microversion,
CONF.placement.min_microversion))
cls.build_interval = CONF.compute.build_interval
cls.build_timeout = CONF.compute.build_timeout
cls.image_ref = CONF.compute.image_ref
cls.image_ref_alt = CONF.compute.image_ref_alt
cls.flavor_ref = CONF.compute.flavor_ref
cls.flavor_ref_alt = CONF.compute.flavor_ref_alt
cls.ssh_user = CONF.validation.image_ssh_user
cls.ssh_alt_user = CONF.validation.image_alt_ssh_user
cls.image_ssh_user = CONF.validation.image_ssh_user
cls.image_alt_ssh_user = CONF.validation.image_alt_ssh_user
cls.image_ssh_password = CONF.validation.image_ssh_password
cls.image_alt_ssh_password = CONF.validation.image_alt_ssh_password
@classmethod
def is_requested_microversion_compatible(cls, max_version):
"""Check the compatibility of selected request microversion
This method will check if selected request microversion
(cls.request_microversion) for test is compatible with respect
to 'max_version'. Compatible means if selected request microversion
is in the range(<=) of 'max_version'.
:param max_version: maximum microversion to compare for compatibility.
Example: '2.30'
:returns: True if selected request microversion is compatible with
'max_version'. False in other case.
"""
try:
req_version_obj = api_version_request.APIVersionRequest(
cls.request_microversion)
# NOTE(gmann): This is case where this method is used before calling
# resource_setup(), where cls.request_microversion is set. There may
# not be any such case but still we can handle this case.
except AttributeError:
request_microversion = (
api_version_utils.select_request_microversion(
cls.min_microversion,
CONF.compute.min_microversion))
req_version_obj = api_version_request.APIVersionRequest(
request_microversion)
max_version_obj = api_version_request.APIVersionRequest(max_version)
return req_version_obj <= max_version_obj
@classmethod
def server_check_teardown(cls):
"""Checks is the shared server clean enough for subsequent test.
Method will delete the server when it's dirty.
The setUp method is responsible for creating a new server.
Exceptions raised in tearDown class are fails the test case,
This method supposed to use only by tearDown methods, when
the shared server_id is stored in the server_id of the class.
"""
if getattr(cls, 'server_id', None) is not None:
try:
waiters.wait_for_server_status(cls.servers_client,
cls.server_id, 'ACTIVE')
except Exception as exc:
LOG.exception(exc)
cls.servers_client.delete_server(cls.server_id)
waiters.wait_for_server_termination(cls.servers_client,
cls.server_id)
cls.server_id = None
raise
@classmethod
def create_test_server(cls, validatable=False, volume_backed=False,
validation_resources=None, clients=None, **kwargs):
"""Wrapper utility that returns a test server.
This wrapper utility calls the common create test server and
returns a test server. The purpose of this wrapper is to minimize
the impact on the code of the tests already using this
function.
:param validatable: Whether the server will be pingable or sshable.
:param volume_backed: Whether the instance is volume backed or not.
:param validation_resources: Dictionary of validation resources as
returned by `get_class_validation_resources`.
:param clients: Client manager, defaults to os_primary.
:param kwargs: Extra arguments are passed down to the
`compute.create_test_server` call.
"""
if 'name' not in kwargs:
kwargs['name'] = data_utils.rand_name(cls.__name__ + "-server")
request_version = api_version_request.APIVersionRequest(
cls.request_microversion)
v2_37_version = api_version_request.APIVersionRequest('2.37')
tenant_network = cls.get_tenant_network()
# NOTE(snikitin): since microversion v2.37 'networks' field is required
if (request_version >= v2_37_version and 'networks' not in kwargs and
not tenant_network):
kwargs['networks'] = 'none'
if clients is None:
clients = cls.os_primary
body, servers = compute.create_test_server(
clients,
validatable,
validation_resources=validation_resources,
tenant_network=tenant_network,
volume_backed=volume_backed,
**kwargs)
# For each server schedule wait and delete, so we first delete all
# and then wait for all
for server in servers:
cls.addClassResourceCleanup(waiters.wait_for_server_termination,
clients.servers_client, server['id'])
for server in servers:
cls.addClassResourceCleanup(
test_utils.call_and_ignore_notfound_exc,
clients.servers_client.delete_server, server['id'])
return body
@classmethod
def create_security_group(cls, name=None, description=None):
if name is None:
name = data_utils.rand_name(cls.__name__ + "-securitygroup")
if description is None:
description = data_utils.rand_name('description')
body = cls.security_groups_client.create_security_group(
name=name, description=description)['security_group']
cls.addClassResourceCleanup(
test_utils.call_and_ignore_notfound_exc,
cls.security_groups_client.delete_security_group,
body['id'])
return body
@classmethod
def create_test_server_group(cls, name="", policy=None):
if not name:
name = data_utils.rand_name(cls.__name__ + "-Server-Group")
if policy is None:
policy = ['affinity']
body = cls.server_groups_client.create_server_group(
name=name, policies=policy)['server_group']
cls.addClassResourceCleanup(
test_utils.call_and_ignore_notfound_exc,
cls.server_groups_client.delete_server_group,
body['id'])
return body
def wait_for(self, condition):
"""Repeatedly calls condition() until a timeout."""
start_time = int(time.time())
while True:
try:
condition()
except Exception:
pass
else:
return
if int(time.time()) - start_time >= self.build_timeout:
condition()
return
time.sleep(self.build_interval)
@classmethod
def prepare_instance_network(cls):
if (CONF.validation.auth_method != 'disabled' and
CONF.validation.connect_method == 'floating'):
cls.set_network_resources(network=True, subnet=True, router=True,
dhcp=True)
@classmethod
def create_image_from_server(cls, server_id, **kwargs):
"""Wrapper utility that returns an image created from the server.
If compute microversion >= 2.36, the returned image response will
be from the image service API rather than the compute image proxy API.
"""
name = kwargs.pop('name',
data_utils.rand_name(cls.__name__ + "-image"))
wait_until = kwargs.pop('wait_until', None)
wait_for_server = kwargs.pop('wait_for_server', True)
image = cls.compute_images_client.create_image(server_id, name=name,
**kwargs)
if api_version_utils.compare_version_header_to_response(
"OpenStack-API-Version", "compute 2.45", image.response, "lt"):
image_id = image['image_id']
else:
image_id = data_utils.parse_image_id(image.response['location'])
# The compute image proxy APIs were deprecated in 2.35 so
# use the images client directly if the API microversion being
# used is >=2.36.
if not cls.is_requested_microversion_compatible('2.35'):
client = cls.images_client
else:
client = cls.compute_images_client
cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
client.delete_image, image_id)
if wait_until is not None:
try:
wait_until = wait_until.upper()
if not cls.is_requested_microversion_compatible('2.35'):
wait_until = wait_until.lower()
waiters.wait_for_image_status(client, image_id, wait_until)
except lib_exc.NotFound:
if wait_until.upper() == 'ACTIVE':
# If the image is not found after create_image returned
# that means the snapshot failed in nova-compute and nova
# deleted the image. There should be a compute fault
# recorded with the server in that case, so get the server
# and dump some details.
server = (
cls.servers_client.show_server(server_id)['server'])
if 'fault' in server:
raise exceptions.SnapshotNotFoundException(
server['fault'], image_id=image_id)
else:
raise exceptions.SnapshotNotFoundException(
image_id=image_id)
else:
raise
image = client.show_image(image_id)
# Compute image client returns response wrapped in 'image' element
# which is not the case with Glance image client.
if 'image' in image:
image = image['image']
if wait_until.upper() == 'ACTIVE':
if wait_for_server:
waiters.wait_for_server_status(cls.servers_client,
server_id, 'ACTIVE')
return image
@classmethod
def recreate_server(cls, server_id, validatable=False, **kwargs):
"""Destroy an existing class level server and creates a new one
Some test classes use a test server that can be used by multiple
tests. This is done to optimise runtime and test load.
If something goes wrong with the test server, it can be rebuilt
using this helper.
This helper can also be used for the initial provisioning if no
server_id is specified.
:param server_id: UUID of the server to be rebuilt. If None is
specified, a new server is provisioned.
:param validatable: whether to the server needs to be
validatable. When True, validation resources are acquired via
the `get_class_validation_resources` helper.
:param kwargs: extra paramaters are passed through to the
`create_test_server` call.
:return: the UUID of the created server.
"""
if server_id:
cls.delete_server(server_id)
cls.password = data_utils.rand_password()
server = cls.create_test_server(
validatable,
validation_resources=cls.get_class_validation_resources(
cls.os_primary),
wait_until='ACTIVE',
adminPass=cls.password,
**kwargs)
return server['id']
@classmethod
def delete_server(cls, server_id):
"""Deletes an existing server and waits for it to be gone."""
try:
cls.servers_client.delete_server(server_id)
waiters.wait_for_server_termination(cls.servers_client,
server_id)
except Exception:
LOG.exception('Failed to delete server %s', server_id)
def resize_server(self, server_id, new_flavor_id, **kwargs):
"""resize and confirm_resize an server, waits for it to be ACTIVE."""
self.servers_client.resize_server(server_id, new_flavor_id, **kwargs)
waiters.wait_for_server_status(self.servers_client, server_id,
'VERIFY_RESIZE')
self.servers_client.confirm_resize_server(server_id)
waiters.wait_for_server_status(
self.servers_client, server_id, 'ACTIVE')
server = self.servers_client.show_server(server_id)['server']
self.assert_flavor_equal(new_flavor_id, server['flavor'])
@classmethod
def delete_volume(cls, volume_id):
"""Deletes the given volume and waits for it to be gone."""
try:
cls.volumes_client.delete_volume(volume_id)
# TODO(mriedem): We should move the wait_for_resource_deletion
# into the delete_volume method as a convenience to the caller.
cls.volumes_client.wait_for_resource_deletion(volume_id)
except lib_exc.NotFound:
LOG.warning("Unable to delete volume '%s' since it was not found. "
"Maybe it was already deleted?", volume_id)
@classmethod
def get_server_ip(cls, server, validation_resources=None):
"""Get the server fixed or floating IP.
Based on the configuration we're in, return a correct ip
address for validating that a guest is up.
:param server: The server dict as returned by the API
:param validation_resources: The dict of validation resources
provisioned for the server.
"""
if CONF.validation.connect_method == 'floating':
if validation_resources:
return validation_resources['floating_ip']['ip']
else:
msg = ('When validation.connect_method equals floating, '
'validation_resources cannot be None')
raise lib_exc.InvalidParam(invalid_param=msg)
elif CONF.validation.connect_method == 'fixed':
addresses = server['addresses'][CONF.validation.network_for_ssh]
for address in addresses:
if address['version'] == CONF.validation.ip_version_for_ssh:
return address['addr']
raise exceptions.ServerUnreachable(server_id=server['id'])
else:
raise lib_exc.InvalidConfiguration()
def setUp(self):
super(BaseV2ComputeTest, self).setUp()
self.useFixture(api_microversion_fixture.APIMicroversionFixture(
compute_microversion=self.request_microversion,
volume_microversion=self.volume_request_microversion,
placement_microversion=self.placement_request_microversion))
@classmethod
def create_volume(cls, image_ref=None, **kwargs):
"""Create a volume and wait for it to become 'available'.
:param image_ref: Specify an image id to create a bootable volume.
:param kwargs: other parameters to create volume.
:returns: The available volume.
"""
if 'size' not in kwargs:
kwargs['size'] = CONF.volume.volume_size
if 'display_name' not in kwargs:
vol_name = data_utils.rand_name(cls.__name__ + '-volume')
kwargs['display_name'] = vol_name
if image_ref is not None:
kwargs['imageRef'] = image_ref
if CONF.compute.compute_volume_common_az:
kwargs.setdefault('availability_zone',
CONF.compute.compute_volume_common_az)
volume = cls.volumes_client.create_volume(**kwargs)['volume']
cls.addClassResourceCleanup(
cls.volumes_client.wait_for_resource_deletion, volume['id'])
cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
cls.volumes_client.delete_volume,
volume['id'])
waiters.wait_for_volume_resource_status(cls.volumes_client,
volume['id'], 'available')
return volume
def _detach_volume(self, server, volume):
"""Helper method to detach a volume.
Ignores 404 responses if the volume or server do not exist, or the
volume is already detached from the server.
"""
try:
volume = self.volumes_client.show_volume(volume['id'])['volume']
# Check the status. You can only detach an in-use volume, otherwise
# the compute API will return a 400 response.
if volume['status'] == 'in-use':
self.servers_client.detach_volume(server['id'], volume['id'])
except lib_exc.NotFound:
# Ignore 404s on detach in case the server is deleted or the volume
# is already detached.
pass
def attach_volume(self, server, volume, device=None, tag=None):
"""Attaches volume to server and waits for 'in-use' volume status.
The volume will be detached when the test tears down.
:param server: The server to which the volume will be attached.
:param volume: The volume to attach.
:param device: Optional mountpoint for the attached volume. Note that
this is not guaranteed for all hypervisors and is not recommended.
:param tag: Optional device role tag to apply to the volume.
"""
attach_kwargs = dict(volumeId=volume['id'])
if device:
attach_kwargs['device'] = device
if tag:
attach_kwargs['tag'] = tag
attachment = self.servers_client.attach_volume(
server['id'], **attach_kwargs)['volumeAttachment']
# On teardown detach the volume and for multiattach volumes wait for
# the attachment to be removed. For non-multiattach volumes wait for
# the state of the volume to change to available. This is so we don't
# error out when trying to delete the volume during teardown.
if volume['multiattach']:
att = waiters.wait_for_volume_attachment_create(
self.volumes_client, volume['id'], server['id'])
self.addCleanup(waiters.wait_for_volume_attachment_remove,
self.volumes_client, volume['id'],
att['attachment_id'])
else:
self.addCleanup(waiters.wait_for_volume_resource_status,
self.volumes_client, volume['id'], 'available')
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'in-use')
# Ignore 404s on detach in case the server is deleted or the volume
# is already detached.
self.addCleanup(self._detach_volume, server, volume)
return attachment
def create_volume_snapshot(self, volume_id, name=None, description=None,
metadata=None, force=False):
name = name or data_utils.rand_name(
self.__class__.__name__ + '-snapshot')
snapshot = self.snapshots_client.create_snapshot(
volume_id=volume_id,
force=force,
display_name=name,
description=description,
metadata=metadata)['snapshot']
self.addCleanup(self.snapshots_client.wait_for_resource_deletion,
snapshot['id'])
self.addCleanup(self.snapshots_client.delete_snapshot, snapshot['id'])
waiters.wait_for_volume_resource_status(self.snapshots_client,
snapshot['id'], 'available')
snapshot = self.snapshots_client.show_snapshot(
snapshot['id'])['snapshot']
return snapshot
def assert_flavor_equal(self, flavor_id, server_flavor):
"""Check whether server_flavor equals to flavor.
:param flavor_id: flavor id
:param server_flavor: flavor info returned by show_server.
"""
# Nova API > 2.46 no longer includes flavor.id, and schema check
# will cover whether 'id' should be in flavor
if server_flavor.get('id'):
msg = ('server flavor is not same as flavor!')
self.assertEqual(flavor_id, server_flavor['id'], msg)
else:
flavor = self.flavors_client.show_flavor(flavor_id)['flavor']
self.assertEqual(flavor['name'], server_flavor['original_name'],
"original_name in server flavor is not same as "
"flavor name!")
for key in ['ram', 'vcpus', 'disk']:
msg = ('attribute %s in server flavor is not same as '
'flavor!' % key)
self.assertEqual(flavor[key], server_flavor[key], msg)
class BaseV2ComputeAdminTest(BaseV2ComputeTest):
"""Base test case class for Compute Admin API tests."""
credentials = ['primary', 'admin']
@classmethod
def setup_clients(cls):
super(BaseV2ComputeAdminTest, cls).setup_clients()
cls.availability_zone_admin_client = (
cls.os_admin.availability_zone_client)
cls.admin_flavors_client = cls.os_admin.flavors_client
cls.admin_servers_client = cls.os_admin.servers_client
cls.image_client = cls.os_admin.image_client_v2
cls.admin_assisted_volume_snapshots_client = \
cls.os_admin.assisted_volume_snapshots_client
def create_flavor(self, ram, vcpus, disk, name=None,
is_public='True', **kwargs):
if name is None:
name = data_utils.rand_name(self.__class__.__name__ + "-flavor")
id = kwargs.pop('id', data_utils.rand_int_id(start=1000))
client = self.admin_flavors_client
flavor = client.create_flavor(
ram=ram, vcpus=vcpus, disk=disk, name=name,
id=id, is_public=is_public, **kwargs)['flavor']
self.addCleanup(client.wait_for_resource_deletion, flavor['id'])
self.addCleanup(client.delete_flavor, flavor['id'])
return flavor
@classmethod
def get_host_for_server(cls, server_id):
server_details = cls.admin_servers_client.show_server(server_id)
return server_details['server']['OS-EXT-SRV-ATTR:host']
def get_host_other_than(self, server_id):
source_host = self.get_host_for_server(server_id)
svcs = self.os_admin.services_client.list_services(
binary='nova-compute')['services']
hosts = []
for svc in svcs:
if svc['state'] == 'up' and svc['status'] == 'enabled':
if CONF.compute.compute_volume_common_az:
if svc['zone'] == CONF.compute.compute_volume_common_az:
hosts.append(svc['host'])
else:
hosts.append(svc['host'])
for target_host in hosts:
if source_host != target_host:
return target_host
| 45.637703 | 79 | 0.639635 | [
"Apache-2.0"
] | AurelienLourot/tempest | tempest/api/compute/base.py | 30,988 | Python |
"""Script that generates a refresh token for a specific user."""
import os
import sys
import spotipy.util as util
import json
if len(sys.argv) == 2:
username = str(sys.argv[1])
else:
print('Usage: {} username'.format(sys.argv[0]))
sys.exit(1)
scope = 'user-read-currently-playing user-read-playback-state'
# Get tokens from Spotify.
try:
util.prompt_for_user_token(username, scope)
except:
raise RuntimeError('Could not fetch token.')
# Print refresh token.
with open('.cache-{}'.format(username)) as json_file:
data = json.load(json_file)
print('Refresh token for {}: {}'.format(username, data['refresh_token']))
| 24.923077 | 77 | 0.699074 | [
"MIT"
] | Georgej5/Colorfy | spotify_setup.py | 648 | Python |
from sdklib.http import HttpSdk
class SampleHttpsHttpSdk(HttpSdk):
DEFAULT_HOST = "https://www.google.com"
API_IVANPRJCTS_PATH = "/ivanprjcts"
def get_ivanprjcts(self):
return self.get(self.API_IVANPRJCTS_PATH)
| 19.666667 | 49 | 0.728814 | [
"BSD-2-Clause"
] | ivanprjcts/sdklib | tests/sample_sdk_https.py | 236 | Python |
from django.forms import ModelForm
from .models import Post, Comment
from loginsignup.utils import getBeaverInstance
class PostForm(ModelForm):
class Meta:
model = Post
exclude = ["likes", "posted_on", "post_creator"]
def checkPost(self, request):
if self.is_valid():
post = self.save(commit=False)
beaver = getBeaverInstance(request)
post.post_creator = beaver
post.save()
return True
return False
class CommentForm(ModelForm):
class Meta:
model = Comment
fields = ["comment"]
def checkComment(self, request, post):
if self.is_valid():
comment = self.save(commit=False)
comment.comment_creator = getBeaverInstance(request)
comment.post = post
comment.save()
return True
return False
| 26.176471 | 64 | 0.602247 | [
"MIT"
] | BastaAditya/Quiver | Quiver/posts/forms.py | 890 | Python |
'''
Equations are given in the format A / B = k, where A and B are variables represented as strings, and k is a real number (floating point number). Given some queries, return the answers. If the answer does not exist, return -1.0.
Example:
Given a / b = 2.0, b / c = 3.0.
queries are: a / c = ?, b / a = ?, a / e = ?, a / a = ?, x / x = ? .
return [6.0, 0.5, -1.0, 1.0, -1.0 ].
The input is: vector<pair<string, string>> equations, vector<double>& values, vector<pair<string, string>> queries , where equations.size() == values.size(), and the values are positive. This represents the equations. Return vector<double>.
According to the example above:
equations = [ ["a", "b"], ["b", "c"] ],
values = [2.0, 3.0],
queries = [ ["a", "c"], ["b", "a"], ["a", "e"], ["a", "a"], ["x", "x"] ].
The input is always valid. You may assume that evaluating the queries will result in no division by zero and there is no contradiction.
'''
class Solution(object):
def buildGraph(self, edges, vals):
"""
:type edge: List[[str, str]]
:type vals: List[Double]
:rtype: dict[dict]
"""
import collections
graph = collections.defaultdict(dict)
for index, val in enumerate(vals):
start = edges[index][0]
end = edges[index][1]
graph[start][end] = val
graph[end][start] = 1 / val
return graph
def insert(self, start, end, val):
self.graph[start][end] = val
self.graph[end][start] = 1 / val
def search(self, start, end):
val = 1.0
visited = dict()
size = len(self.graph)
mark = set()
mark.add(start)
visited[start] = 1.0
while (len(mark) > 0) and (end not in visited):
src = mark.pop()
for (dest, val) in self.graph[src].items():
if dest not in visited:
mark.add(dest)
visited[dest] = visited[src] * val
return visited.get(end, -1.0)
def calcEquation(self, equations, values, queries):
"""
:type equations: List[List[str]]
:type values: List[float]
:type queries: List[List[str]]
:rtype: List[float]
"""
self.graph = self.buildGraph(equations, values)
output = list()
for (start, end) in queries:
if start not in self.graph or end not in self.graph:
output.append(-1.0)
continue
val = self.search(start, end)
if val > 0:
output.append(val)
self.insert(start, end, val)
else:
output.append(-1.0)
return output
solution = Solution()
equations = [["x1","x2"],["x2","x3"],["x3","x4"],["x4","x5"]]
values = [3.0,4.0,5.0,6.0]
queries = [["x1","x5"],["x5","x2"],["x2","x4"],["x2","x2"],["x2","x9"],["x9","x9"]]
print solution.calcEquation(equations, values, queries)
| 35.566265 | 240 | 0.542005 | [
"BSD-3-Clause"
] | shub0/algorithm-data-structure | python/evaluate_division.py | 2,952 | Python |
# -*- coding: utf-8 -*-
import hmac
import requests
from json import dumps
from hashlib import sha1
from .app import api, env
def match_any_if_any(event, events):
return events is None or event in events
class Subscription:
def __init__(self, data):
self.data = data
self.events = data['data'].get('events') # user defined
def __getitem__(self, config):
return self.data[config]
class Subscriptions:
store = {}
@classmethod
def add(cls, sub):
Subscriptions.store[sub['id']] = Subscription(sub)
@classmethod
def is_listening_for(cls, event):
for id, sub in Subscriptions.store.items():
if match_any_if_any(event, sub.events):
return True
return False
@classmethod
def publish(cls, eventid, event, data):
for id, sub in Subscriptions.store.items():
if match_any_if_any(event, sub.events):
requests.post(
sub['endpoint'],
headers={'Content-Type': 'application/json'},
data=dumps(dict(
eventType=event,
cloudEventsVersion='0.1',
contentType='application/vnd.omg.object+json',
eventID=eventid,
data=data
))
)
@classmethod
def remove(cls, eventid):
Subscriptions.store.pop(eventid, None)
@api.route('/webhooks/subscribe')
async def subscribe(req, resp):
data = await req.media()
Subscriptions.add(data)
resp.text = 'Subscribed'
@api.route('/webhooks/unsubscribe')
async def unsubscribe(req, resp):
data = await req.media()
Subscriptions.remove(data['id'])
resp.text = 'Unsubscribed'
@api.route('/webhooks')
async def webhooks(req, resp):
"""
Handle incoming GitHub webhooks
"""
data = await req.media()
eventid = req.headers.get('X-GitHub-Delivery')
event = req.headers.get('X-GitHub-Event')
if not Subscriptions.is_listening_for(event):
resp.text = f'Accepted, but not listening for {event} events.'
return
if env.webhook_secret:
signature = req.headers.get('X-Hub-Signature')
assert signature, 'X-Hub-Signature not found in the header.'
sha_name, signature = signature.split('=')
assert sha_name == 'sha1'
mac = hmac.new(env.webhook_secret, msg=data, digestmod='sha1')
assert str(mac.hexdigest()) == str(signature)
Subscriptions.publish(eventid, event, {'event': event, 'payload': data})
resp.text = 'Accepted'
| 26.78 | 76 | 0.590739 | [
"MIT"
] | adnrs96/github | app/webhooks.py | 2,678 | Python |
import json
import requests
from cklib.args import ArgumentParser
from cklib.logging import log
from cklib.jwt import encode_jwt_to_headers
from cklib.graph import Graph, GraphExportIterator
def send_to_ckcore(graph: Graph):
if not ArgumentParser.args.ckcore_uri:
return
log.info("ckcore Event Handler called")
base_uri = ArgumentParser.args.ckcore_uri.strip("/")
ckcore_graph = ArgumentParser.args.ckcore_graph
dump_json = ArgumentParser.args.debug_dump_json
create_graph(base_uri, ckcore_graph)
update_model(graph, base_uri, dump_json=dump_json)
send_graph(graph, base_uri, ckcore_graph, dump_json=dump_json)
def create_graph(ckcore_base_uri: str, ckcore_graph: str):
graph_uri = f"{ckcore_base_uri}/graph/{ckcore_graph}"
log.debug(f"Creating graph {ckcore_graph} via {graph_uri}")
headers = {"accept": "application/json"}
if getattr(ArgumentParser.args, "psk", None):
encode_jwt_to_headers(headers, {}, ArgumentParser.args.psk)
r = requests.post(graph_uri, data="", headers=headers)
if r.status_code != 200:
log.error(r.content)
raise RuntimeError(f"Failed to create graph: {r.content}")
def update_model(graph: Graph, ckcore_base_uri: str, dump_json: bool = False):
model_uri = f"{ckcore_base_uri}/model"
log.debug(f"Updating model via {model_uri}")
model_json = json.dumps(graph.export_model(), indent=4)
if dump_json:
with open("model.dump.json", "w") as model_outfile:
model_outfile.write(model_json)
headers = {}
if getattr(ArgumentParser.args, "psk", None):
encode_jwt_to_headers(headers, {}, ArgumentParser.args.psk)
r = requests.patch(model_uri, data=model_json, headers=headers)
if r.status_code != 200:
log.error(r.content)
raise RuntimeError(f"Failed to create model: {r.content}")
def send_graph(
graph: Graph, ckcore_base_uri: str, ckcore_graph: str, dump_json: bool = False
):
merge_uri = f"{ckcore_base_uri}/graph/{ckcore_graph}/merge"
log.debug(f"Sending graph via {merge_uri}")
graph_outfile = None
if dump_json:
graph_outfile = open("graph.dump.json", "w")
try:
graph_export_iterator = GraphExportIterator(graph, graph_outfile)
headers = {
"Content-Type": "application/x-ndjson",
"Cloudkeeper-Ckworker-Nodes": str(graph.number_of_nodes()),
"Cloudkeeper-Ckworker-Edges": str(graph.number_of_edges()),
}
if getattr(ArgumentParser.args, "psk", None):
encode_jwt_to_headers(headers, {}, ArgumentParser.args.psk)
r = requests.post(
merge_uri,
data=graph_export_iterator,
headers=headers,
)
if r.status_code != 200:
log.error(r.content)
raise RuntimeError(f"Failed to send graph: {r.content}")
log.debug(f"ckcore reply: {r.content.decode()}")
log.debug(
f"Sent {graph_export_iterator.nodes_sent} nodes and"
f" {graph_export_iterator.edges_sent} edges to ckcore"
)
finally:
if graph_outfile is not None:
graph_outfile.close()
def add_args(arg_parser: ArgumentParser) -> None:
arg_parser.add_argument(
"--ckcore-uri",
help="ckcore URI (default: http://localhost:8900)",
default="http://localhost:8900",
dest="ckcore_uri",
)
arg_parser.add_argument(
"--ckcore-ws-uri",
help="ckcore Websocket URI (default: ws://localhost:8900)",
default="ws://localhost:8900",
dest="ckcore_ws_uri",
)
arg_parser.add_argument(
"--ckcore-graph",
help="ckcore graph name (default: ck)",
default="ck",
dest="ckcore_graph",
)
arg_parser.add_argument(
"--debug-dump-json",
help="Dump the generated json data (default: False)",
dest="debug_dump_json",
action="store_true",
)
| 32.096774 | 82 | 0.654271 | [
"Apache-2.0"
] | mesosphere/cloudkeeper | ckworker/ckworker/ckcore.py | 3,980 | Python |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from openstack import resource
class Queue(resource.Resource):
# FIXME(anyone): The name string of `location` field of Zaqar API response
# is lower case. That is inconsistent with the guide from API-WG. This is
# a workaround for this issue.
location = resource.Header("location")
resources_key = "queues"
base_path = "/queues"
# capabilities
allow_create = True
allow_list = True
allow_fetch = True
allow_delete = True
# Properties
#: The default TTL of messages defined for a queue, which will effect for
#: any messages posted to the queue.
default_message_ttl = resource.Body("_default_message_ttl")
#: Description of the queue.
description = resource.Body("description")
#: The max post size of messages defined for a queue, which will effect
#: for any messages posted to the queue.
max_messages_post_size = resource.Body("_max_messages_post_size")
#: Name of the queue. The name is the unique identity of a queue. It
#: must not exceed 64 bytes in length, and it is limited to US-ASCII
#: letters, digits, underscores, and hyphens.
name = resource.Body("name", alternate_id=True)
#: The ID to identify the client accessing Zaqar API. Must be specified
#: in header for each API request.
client_id = resource.Header("Client-ID")
#: The ID to identify the project accessing Zaqar API. Must be specified
#: in case keystone auth is not enabled in Zaqar service.
project_id = resource.Header("X-PROJECT-ID")
def create(self, session, prepend_key=True):
request = self._prepare_request(requires_id=True,
prepend_key=prepend_key)
headers = {
"Client-ID": self.client_id or str(uuid.uuid4()),
"X-PROJECT-ID": self.project_id or session.get_project_id()
}
request.headers.update(headers)
response = session.put(request.url,
json=request.body, headers=request.headers)
self._translate_response(response, has_body=False)
return self
@classmethod
def list(cls, session, paginated=False, **params):
"""This method is a generator which yields queue objects.
This is almost the copy of list method of resource.Resource class.
The only difference is the request header now includes `Client-ID`
and `X-PROJECT-ID` fields which are required by Zaqar v2 API.
"""
more_data = True
query_params = cls._query_mapping._transpose(params)
uri = cls.base_path % params
headers = {
"Client-ID": params.get('client_id', None) or str(uuid.uuid4()),
"X-PROJECT-ID": params.get('project_id', None
) or session.get_project_id()
}
while more_data:
resp = session.get(uri,
headers=headers, params=query_params)
resp = resp.json()
resp = resp[cls.resources_key]
if not resp:
more_data = False
yielded = 0
new_marker = None
for data in resp:
value = cls.existing(**data)
new_marker = value.id
yielded += 1
yield value
if not paginated:
return
if "limit" in query_params and yielded < query_params["limit"]:
return
query_params["limit"] = yielded
query_params["marker"] = new_marker
def fetch(self, session, requires_id=True, error_message=None):
request = self._prepare_request(requires_id=requires_id)
headers = {
"Client-ID": self.client_id or str(uuid.uuid4()),
"X-PROJECT-ID": self.project_id or session.get_project_id()
}
request.headers.update(headers)
response = session.get(request.url,
headers=headers)
self._translate_response(response)
return self
def delete(self, session):
request = self._prepare_request()
headers = {
"Client-ID": self.client_id or str(uuid.uuid4()),
"X-PROJECT-ID": self.project_id or session.get_project_id()
}
request.headers.update(headers)
response = session.delete(request.url,
headers=headers)
self._translate_response(response, has_body=False)
return self
| 38.240602 | 78 | 0.624853 | [
"Apache-2.0"
] | TeutoNet/openstacksdk | openstack/message/v2/queue.py | 5,086 | Python |
from dataclasses import dataclass, field
__NAMESPACE__ = "NISTSchema-SV-IV-union-short-gYear-pattern-3-NS"
@dataclass
class NistschemaSvIvUnionShortGYearPattern3:
class Meta:
name = "NISTSchema-SV-IV-union-short-gYear-pattern-3"
namespace = "NISTSchema-SV-IV-union-short-gYear-pattern-3-NS"
value: str = field(
default="",
metadata={
"pattern": r"\d\d50",
}
)
| 23.777778 | 69 | 0.64486 | [
"MIT"
] | tefra/xsdata-w3c-tests | output/models/nist_data/union/short_g_year/schema_instance/nistschema_sv_iv_union_short_g_year_pattern_3_xsd/nistschema_sv_iv_union_short_g_year_pattern_3.py | 428 | Python |
''' setup module
'''
from distutils.core import setup
# TEMPLATE
setup(
name='mask-query-aide',
version='0.0',
description='python code to train ML for detecting people with masks',
long_description=open('README.rst').read(),
author='Christine Madden',
license=open('LICENSE').read(),
author_email='[email protected]',
packages=['mask_query_aide'],
# python_requires="<3.8",
install_requires=[
"numpy==1.16.1",
"pandas",
"matplotlib",
"opencv-python<=4.1.2.30",
"keras==2.2.4",
"tensorflow<2.0",
"tensorflow-gpu<2.0",
"imageai",
"jupyterlab",
"requests",
],
entry_points={
'console_scripts':
[
'mask_query_aide = mask_query_aide.__main__:main',
]
}
)
| 23.166667 | 74 | 0.570743 | [
"MIT"
] | usckiki82/mask-query-aide | setup.py | 834 | Python |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import numpy as np
__all__ = ["Config"]
class Config(object):
"""
Config: Holds configuration settings.
Parameters
----------
fitParameters : list
Parameters to fit.
parameterPriors : dict
Dictionary with parameters as keys, and a dictionary
as the value for each key. This dictionary is called
to setup the pymc3 priors for each parameter not in
fitParameters.
columnMapping : dict
This dictionary should define the
column names of the user's data relative to the
internally used names.
tableParameterLimits : dict
This is dictionary is called
when building model tables to set the grid in subsolar
temperature and phase angle. It should have 'T_ss' and
'alpha' as keys. Values should be a list:
the first element should be another list
with the lower and upper bounds, the second element
should be the step size.
threads : int
The number of threads to use when bulding model tables
and running the multi-fit script.
numSamples : int
Number of samples to draw from the posterior distribution.
numBurnIn : int
Number of the drawn samples to discard from summary statistics
and plotting.
numChains : int
Number of Markov chains to sample the posterior distribution.
phaseAngleFluxCorrection : float
The default value to correct for phase-angle effects in the
Standard Thermal Model. The canonical value is 0.01.
verbose : bool
Print progress statements?
"""
fitParameters = ["logT1", "logD", "eps"]
parameterPriors = {
"logD": {
"lower": 1,
"upper": 8,
},
"eps": {
"lower": 0.0,
"upper": 1.0},
"logT1": {
"lower": 0.01,
"upper": 5,
},
"T_ss": {
"lower": 10,
"upper": 1200.0
},
"alpha_rad": {
"lower": 0,
"upper": np.pi
},
"r_au": {
"lower": 0,
"upper": 10
},
"delta_au": {
"lower": 0,
"upper": 10
},
"G": {
"lower": 0,
"upper": 1},
"p": {
"lower": 0,
"upper": 5
},
"eta": {
"lower": 0,
"upper": 10
}
}
columnMapping = {
"designation" : "designation",
"obs_id": "obs_id",
"exp_mjd": "mjd",
"r_au": "r_au",
"delta_au": "delta_au",
"alpha_rad": "alpha_rad",
"G": "G",
"logD": "logD",
"logT1" : "logT1",
"eta": "eta",
"eps": "eps",
"flux_si": ["flux_W1_si", "flux_W2_si", "flux_W3_si", "flux_W4_si"],
"fluxErr_si": ["fluxErr_W1_si", "fluxErr_W2_si", "fluxErr_W3_si", "fluxErr_W4_si"],
"mag" : ["mag_W1", "mag_W2", "mag_W3", "mag_W4"],
"magErr" : ["magErr_W1", "magErr_W2", "magErr_W3", "magErr_W4"]
}
tableParameterLimits = {
"T_ss": [[100.0, 1200.0], 0.5],
"alpha": [[0.0, np.pi], np.pi/360]
}
threads = 10
samples = 2500
burnInSamples = 500
chains = 20
phaseAngleFluxCorrection = 0.01
verbose = True
| 28.180328 | 91 | 0.513962 | [
"BSD-3-Clause"
] | moeyensj/atm | atm/config.py | 3,438 | Python |
from attr import dataclass
#s4 teng https://t.me/shuraim1/https:/
#S5 teng https://t.me/alquran30juzsaadalghamidi/5
#s6 teng https://t.me/bandar_abdulaziz_balilah/5
#s7 teng https://t.me/Idriss_Akbar/388
#s8 teng https://t.me/yasseraldosari_mp3/2
sura = {
'0': {'s1':'42', 's2':'257', 's3':'18', 's4':'3', 's5':'5', 's6':'5', 's7':'388', 's8':'2',},
'1': {'s1':'42', 's2':'257', 's3':'18', 's4':'3', 's5':'5', 's6':'5', 's7':'388', 's8':'2',},
'2': {'s1':'43', 's2':'258', 's3':'19', 's4':'4', 's5':'6', 's6':'6', 's7':'389', 's8':'3',},
'3': {'s1':'44', 's2':'259', 's3':'20', 's4':'5', 's5':'7', 's6':'7', 's7':'390', 's8':'4',},
'4': {'s1':'45', 's2':'260', 's3':'21', 's4':'6', 's5':'8', 's6':'8', 's7':'391', 's8':'5',},
'5': {'s1':'46', 's2':'261', 's3':'22', 's4':'7', 's5':'9', 's6':'9', 's7':'392', 's8':'6',},
'6': {'s1':'47', 's2':'262', 's3':'23', 's4':'8', 's5':'10', 's6':'10', 's7':'393', 's8':'7',},
'7': {'s1':'48', 's2':'263', 's3':'24', 's4':'9', 's5':'11', 's6':'11', 's7':'394', 's8':'8',},
'8': {'s1':'49', 's2':'264', 's3':'25', 's4':'10', 's5':'12', 's6':'12', 's7':'395', 's8':'9',},
'9': {'s1':'50', 's2':'265', 's3':'26', 's4':'11', 's5':'13', 's6':'13', 's7':'396', 's8':'10',},
'10': {'s1':'51', 's2':'266', 's3':'27', 's4':'12', 's5':'14', 's6':'14', 's7':'397', 's8':'11',},
'11': {'s1': '52', 's2':'267', 's3':'28', 's4':'13', 's5':'15', 's6':'15', 's7':'398', 's8':'12',},
'12': {'s1':'53', 's2':'268', 's3':'29', 's4':'14', 's5':'16', 's6':'16', 's7':'399', 's8':'13',},
'13': {'s1': '54', 's2':'269', 's3':'30', 's4':'15', 's5':'17', 's6':'17', 's7':'401', 's8':'14',},
'14': {'s1':'55', 's2':'270', 's3':'31', 's4':'16', 's5':'18', 's6':'18', 's7':'402', 's8':'15',},
'15': {'s1':'56', 's2':'271', 's3':'32', 's4':'17', 's5':'19', 's6':'19', 's7':'403', 's8':'16',},
'16': {'s1':'59', 's2':'272', 's3':'33', 's4':'18', 's5':'20', 's6':'20', 's7':'404', 's8':'17',},
'17': {'s1':'60', 's2':'273', 's3':'34', 's4':'19', 's5':'21', 's6':'21', 's7':'405', 's8':'18',},
'18' : {'s1':'61', 's2':'274', 's3':'35', 's4':'20', 's5':'22', 's6':'22', 's7':'406', 's8':'19',},
'19': {'s1':'62', 's2':'275', 's3':'36', 's4':'21', 's5':'23', 's6':'23', 's7':'407', 's8':'20',},
'20': {'s1':'63', 's2':'276', 's3':'37', 's4':'22', 's5':'24', 's6':'24', 's7':'408', 's8':'21',},
'21': {'s1':'64', 's2':'277', 's3':'38', 's4':'23', 's5':'25', 's6':'25', 's7':'409', 's8':'22',},
'22': {'s1':'65', 's2':'278', 's3':'39', 's4':'24', 's5':'26', 's6':'26', 's7':'410', 's8':'23',},
'23': {'s1':'66', 's2':'279', 's3':'40', 's4':'25', 's5':'27', 's6':'27', 's7':'411', 's8':'24',},
'24': {'s1':'67', 's2':'280', 's3':'41', 's4':'26', 's5':'28', 's6':'28', 's7':'412', 's8':'25',},
'25': {'s1':'68', 's2':'281', 's3':'42', 's4':'27', 's5':'29', 's6':'29', 's7':'413', 's8':'26',},
'26': {'s1':'69', 's2':'282', 's3':'43', 's4':'28', 's5':'30', 's6':'30', 's7':'414', 's8':'27',},
'27': {'s1':'70', 's2':'283', 's3':'44', 's4':'29', 's5':'31', 's6':'31', 's7':'415', 's8':'28',},
'28': {'s1':'71', 's2':'284', 's3':'45', 's4':'30', 's5':'32', 's6':'32', 's7':'416', 's8':'29',},
'29': {'s1':'72', 's2':'285', 's3':'46', 's4':'31', 's5':'33', 's6':'33', 's7':'417', 's8':'30',},
'30': {'s1':'73', 's2':'286', 's3':'47', 's4':'32', 's5':'34', 's6':'34', 's7':'418', 's8':'31',},
'31': {'s1':'74', 's2':'287', 's3':'48', 's4':'33', 's5':'35', 's6':'35', 's7':'419', 's8':'32',},
'32': {'s1':'75', 's2':'288', 's3':'49', 's4':'34', 's5':'36', 's6':'36', 's7':'420', 's8':'33',},
'33': {'s1':'76', 's2':'289', 's3':'50', 's4':'35', 's5':'37', 's6':'37', 's7':'421', 's8':'34',},
'34': {'s1':'77', 's2':'290', 's3':'51', 's4':'36', 's5':'38', 's6':'38', 's7':'422', 's8':'35',},
'35': {'s1':'78', 's2':'291', 's3':'52', 's4':'37', 's5':'39', 's6':'39', 's7':'423', 's8':'36',},
'36': {'s1':'79', 's2':'292', 's3':'53', 's4':'38', 's5':'40', 's6':'40', 's7':'424', 's8':'37',},
'37': {'s1':'80', 's2':'293', 's3':'54', 's4':'39', 's5':'41', 's6':'41', 's7':'425', 's8':'38',},
'38': {'s1':'81', 's2':'294', 's3':'55', 's4':'40', 's5':'42', 's6':'42', 's7':'426', 's8':'39',},
'39': {'s1':'82', 's2':'295', 's3':'56', 's4':'41', 's5':'43', 's6':'43', 's7':'427', 's8':'40',},
'40': {'s1':'83', 's2':'296', 's3':'57', 's4':'42', 's5':'44', 's6':'44', 's7':'428', 's8':'41',},
'41': {'s1':'84', 's2':'297', 's3':'58', 's4':'43', 's5':'45', 's6':'45', 's7':'429', 's8':'42',},
'42': {'s1':'85', 's2':'298', 's3':'59', 's4':'44', 's5':'46', 's6':'46', 's7':'430', 's8':'43',},
'43': {'s1':'86', 's2':'299', 's3':'60', 's4':'45', 's5':'47', 's6':'47', 's7':'431', 's8':'44',},
'44': {'s1':'87', 's2':'300', 's3':'61', 's4':'46', 's5':'48', 's6':'48', 's7':'432', 's8':'45',},
'45': {'s1':'88', 's2':'301', 's3':'62', 's4':'47', 's5':'49', 's6':'49', 's7':'433', 's8':'46',},
'46': {'s1':'89', 's2':'302', 's3':'63', 's4':'48', 's5':'50', 's6':'50', 's7':'434', 's8':'47',},
'47': {'s1':'90', 's2':'303', 's3':'64', 's4':'49', 's5':'51', 's6':'51', 's7':'435', 's8':'48',},
'48': {'s1':'91', 's2':'304', 's3':'65', 's4':'50', 's5':'52', 's6':'52', 's7':'436', 's8':'49',},
'49': {'s1':'92', 's2':'305', 's3':'66', 's4':'51', 's5':'53', 's6':'53', 's7':'437', 's8':'50',},
'50': {'s1':'93', 's2':'306', 's3':'67', 's4':'52', 's5':'54', 's6':'54', 's7':'438', 's8':'51',},
'51': {'s1':'94', 's2':'307', 's3':'68', 's4':'53', 's5':'55', 's6':'55', 's7':'439', 's8':'52',},
'52': {'s1':'95', 's2':'308', 's3':'69', 's4':'54', 's5':'56', 's6':'56', 's7':'440', 's8':'53',},
'53': {'s1':'96', 's2':'309', 's3':'70', 's4':'55', 's5':'57', 's6':'57', 's7':'441', 's8':'54',},
'54': {'s1':'97', 's2':'310', 's3':'71', 's4':'56', 's5':'58', 's6':'58', 's7':'442', 's8':'55',},
'55': {'s1':'98', 's2':'311', 's3':'72', 's4':'57', 's5':'59', 's6':'59', 's7':'443', 's8':'56',},
'56': {'s1':'99', 's2':'312', 's3':'73', 's4':'58', 's5':'60', 's6':'60', 's7':'444', 's8':'57',},
'57': {'s1':'100', 's2':'313', 's3':'74', 's4':'59', 's5':'61', 's6':'61', 's7':'445', 's8':'58',},
'58': {'s1':'101', 's2':'314', 's3':'75', 's4':'60', 's5':'62', 's6':'62', 's7':'446', 's8':'59',},
'59': {'s1':'102', 's2':'315', 's3':'76', 's4':'61', 's5':'63', 's6':'63', 's7':'447', 's8':'60',},
'60': {'s1':'103', 's2':'316', 's3':'77', 's4':'62', 's5':'64', 's6':'64', 's7':'448', 's8':'61',},
#61 inlinekeyboard starts in here
'61': {'s1':'104', 's2':'317', 's3':'78', 's4':'63', 's5':'65', 's6':'65', 's7':'449', 's8':'62',},
'62': {'s1':'105', 's2':'318', 's3':'79', 's4':'64', 's5':'66', 's6':'66', 's7':'450', 's8':'63',},
'63': {'s1':'106', 's2':'319', 's3':'80', 's4':'65', 's5':'67', 's6':'67', 's7':'451', 's8':'64',},
'64': {'s1':'107', 's2':'320', 's3':'81', 's4':'66', 's5':'68', 's6':'68', 's7':'452', 's8':'65',},
'65': {'s1':'108', 's2':'321', 's3':'82', 's4':'67', 's5':'69', 's6':'69', 's7':'453', 's8':'66',},
'66': {'s1':'109', 's2':'322', 's3':'83', 's4':'68', 's5':'70', 's6':'70', 's7':'454', 's8':'67',},
'67': {'s1':'110', 's2':'323', 's3':'84', 's4':'69', 's5':'71', 's6':'72', 's7':'455', 's8':'68',},
'68': {'s1':'111', 's2':'324', 's3':'85', 's4':'70', 's5':'72', 's6':'73', 's7':'456', 's8':'69',},
'69': {'s1':'112', 's2':'325', 's3':'86', 's4':'71', 's5':'73', 's6':'74', 's7':'457', 's8':'70',},
'70': {'s1':'113', 's2':'326', 's3':'87', 's4':'72', 's5':'74', 's6':'75', 's7':'458', 's8':'71',},
'71': {'s1':'114', 's2':'327', 's3':'88', 's4':'73', 's5':'75', 's6':'76', 's7':'459', 's8':'72',},
'72': {'s1':'115', 's2':'328', 's3':'89', 's4':'74', 's5':'76', 's6':'77', 's7':'460', 's8':'73',},
'73': {'s1':'116', 's2':'329', 's3':'90', 's4':'75', 's5':'77', 's6':'78', 's7':'461', 's8':'74',},
'74': {'s1':'117', 's2':'330', 's3':'91', 's4':'76', 's5':'78', 's6':'79', 's7':'462', 's8':'75',},
'75': {'s1':'118', 's2':'331', 's3':'92', 's4':'77', 's5':'79', 's6':'80', 's7':'463', 's8':'76',},
'76': {'s1':'119', 's2':'332', 's3':'93', 's4':'78', 's5':'80', 's6':'81', 's7':'464', 's8':'77',},
'77': {'s1':'120', 's2':'333', 's3':'94', 's4':'79', 's5':'81', 's6':'82', 's7':'465', 's8':'78',},
'78': {'s1':'121', 's2':'334', 's3':'95', 's4':'80', 's5':'82', 's6':'83', 's7':'466', 's8':'79',},
'79': {'s1':'122', 's2':'335', 's3':'96', 's4':'81', 's5':'83', 's6':'84', 's7':'467', 's8':'80',},
'80': {'s1':'123', 's2':'336', 's3':'97', 's4':'82', 's5':'84', 's6':'85', 's7':'468', 's8':'81',},
'81': {'s1':'124', 's2':'337', 's3':'98', 's4':'83', 's5':'85', 's6':'86', 's7':'469', 's8':'82',},
'82': {'s1':'125', 's2':'338', 's3':'99', 's4':'84', 's5':'86', 's6':'87', 's7':'470', 's8':'83',},
'83': {'s1':'126', 's2':'339', 's3':'100', 's4':'85', 's5':'87', 's6':'88', 's7':'471', 's8':'84',},
'84': {'s1':'127', 's2':'340', 's3':'101', 's4':'86', 's5':'88', 's6':'89', 's7':'472', 's8':'85',},
'85': {'s1':'128', 's2':'341', 's3':'102', 's4':'87', 's5':'89', 's6':'90', 's7':'473', 's8':'86',},
'86': {'s1':'129', 's2':'342', 's3':'103', 's4':'88', 's5':'90', 's6':'91', 's7':'474', 's8':'87',},
'87': {'s1':'130', 's2':'343', 's3':'104', 's4':'89', 's5':'91', 's6':'92', 's7':'475', 's8':'88',},
'88': {'s1':'131', 's2':'344', 's3':'105', 's4':'90', 's5':'92', 's6':'93', 's7':'476', 's8':'89',},
'89': {'s1':'132', 's2':'345', 's3':'106', 's4':'91', 's5':'93', 's6':'94', 's7':'477', 's8':'90',},
'90': {'s1':'133', 's2':'346', 's3':'107', 's4':'92', 's5':'94', 's6':'95', 's7':'478', 's8':'91',},
'91': {'s1':'134', 's2':'347', 's3':'108', 's4':'93', 's5':'95', 's6':'96', 's7':'479', 's8':'92',},
'92': {'s1':'135', 's2':'348', 's3':'109', 's4':'94', 's5':'96', 's6':'97', 's7':'480', 's8':'93',},
'93': {'s1':'136', 's2':'349', 's3':'110', 's4':'95', 's5':'97', 's6':'98', 's7':'481', 's8':'94',},
'94': {'s1':'137', 's2':'350', 's3':'111', 's4':'96', 's5':'98', 's6':'99', 's7':'482', 's8':'95',},
'95': {'s1':'138', 's2':'351', 's3':'112', 's4':'97', 's5':'99', 's6':'100', 's7':'483', 's8':'96',},
'96': {'s1':'139', 's2':'352', 's3':'113', 's4':'98', 's5':'100', 's6':'101', 's7':'484', 's8':'97',},
'97': {'s1':'140', 's2':'353', 's3':'114', 's4':'99', 's5':'101', 's6':'102', 's7':'485', 's8':'98',},
'98': {'s1':'141', 's2':'354', 's3':'115', 's4':'100', 's5':'102', 's6':'103', 's7':'486', 's8':'99',},
'99': {'s1':'142', 's2':'355', 's3':'116', 's4':'101', 's5':'103', 's6':'104', 's7':'487', 's8':'100',},
'100': {'s1':'143', 's2':'356', 's3':'117', 's4':'102', 's5':'104', 's6':'105', 's7':'488', 's8':'101',},
'101': {'s1':'144', 's2':'357', 's3':'118', 's4':'103', 's5':'105', 's6':'106', 's7':'489', 's8':'102',},
'102': {'s1':'145', 's2':'358', 's3':'119', 's4':'104', 's5':'106', 's6':'107', 's7':'490', 's8':'103',},
'103': {'s1':'146', 's2':'359', 's3':'120', 's4':'105', 's5':'107', 's6':'108', 's7':'491', 's8':'104',},
'104': {'s1':'147', 's2':'360', 's3':'121', 's4':'106', 's5':'108', 's6':'109', 's7':'492', 's8':'105',},
'105': {'s1':'148', 's2':'361', 's3':'122', 's4':'107', 's5':'109', 's6':'110', 's7':'493', 's8':'106',},
'106': {'s1':'149', 's2':'362', 's3':'123', 's4':'108', 's5':'110', 's6':'111', 's7':'494', 's8':'107',},
'107': {'s1':'150', 's2':'363', 's3':'124', 's4':'109', 's5':'111', 's6':'112', 's7':'495', 's8':'108',},
'108': {'s1':'151', 's2':'364', 's3':'125', 's4':'110', 's5':'112', 's6':'113', 's7':'496', 's8':'109',},
'109': {'s1':'152', 's2':'365', 's3':'126', 's4':'111', 's5':'113', 's6':'114', 's7':'497', 's8':'110',},
'110': {'s1':'153', 's2':'366', 's3':'127', 's4':'112', 's5':'114', 's6':'115', 's7':'498', 's8':'111',},
'111': {'s1':'154', 's2':'367', 's3':'128', 's4':'113', 's5':'115', 's6':'116', 's7':'499', 's8':'112',},
'112': {'s1':'155', 's2':'368', 's3':'129', 's4':'114', 's5':'116', 's6':'117', 's7':'500', 's8':'113',},
'113': {'s1':'156', 's2':'369', 's3':'130', 's4':'115', 's5':'117', 's6':'118', 's7':'501', 's8':'114',},
'114': {'s1':'157', 's2':'370', 's3':'131', 's4':'116', 's5':'118', 's6':'119', 's7':'502', 's8':'115',}
}
bbc = {'22':'11', 'n':{
'55':'56',
'55':'58',
'55':'59',
'55':'555',
}}
hmuchun = {
'hm5':{
'rep':257,
'rep2':287,
},
'hm6':{
'rep':288,
'rep2':317,
},
'hm7':{
'rep':317,
'rep2':347,
},
'hm8':{
'rep':347,
'rep2':371,
},
'hm9':{
'rep':18,
'rep2':48,
},
'hm10':{
'rep':48,
'rep2':78,
},
'hm11':{
'rep':78,
'rep2':108,
},
'hm12':{
'rep':108,
'rep2':137,
},
}
| 74.244444 | 112 | 0.348623 | [
"BSD-2-Clause"
] | SarvarRaxmonov/december-2 | pipuchun/jsonuz.py | 13,364 | Python |
a, b = raw_input().split()
a = int(a)
b = int(b)
if b > a:
print(b)
else:
print(a)1 | 11.625 | 27 | 0.505376 | [
"MIT"
] | KelwinKomka/URI | src/UOJ_1933 - (3425561) Accepted.py | 93 | Python |
#!/usr/bin/python2
import re, sys
from pygments import highlight
from pygments.lexers import get_lexer_by_name, JavascriptLexer, FactorLexer
from pygments.formatters import HtmlFormatter
from pygments.token import *
from pygments.lexer import RegexLexer
class UnlambdaLexer(RegexLexer):
name = 'Unlambda'
aliases = ['unlambda']
filenames = ['*.u']
tokens = {
'root': [
(r'#.*\n', Comment.Single),
(r'd', Comment.Preproc),
(r'\..', Generic.Output),
(r'[sk]', Keyword.Declaration),
(r'[cv]', Keyword.Type),
(r'i', Keyword.Constant),
(r'[@ried|?]', Keyword.Pseudo),
(r'`', Operator),
(r'.', Text),
]
}
class QBasicLexer(RegexLexer):
name = 'QBasic'
aliases = ['qbasic']
filenames = ['*.bas']
tokens = {
'root': [
(r'\'.*\n', Comment.Single),
(r'\"[^"]*\"', Literal.String),
(r'&H[\da-fA-F]+|\d*\.\d+|\d+', Literal.Number),
(r'[-+*/<>=\\]', Operator),
(r'[()\[\]]', Punctuation),
(r'\b(AND|AS|CASE|CONST|DATA|DECLARE|DEF|DEFINT|DIM|DO|ELSE|END|EXIT|FOR|FUNCTION|GOSUB|GOTO|IF|INPUT|LINE|LOOP|MOD|NEXT|NOT|OR|POKE|PRINT|RESTORE|RETURN|SEG|SELECT|SHARED|STATIC|STEP|SUB|TAB|THEN|TO|TYPE|UNTIL|USING|VIEW|WEND|WHILE|XOR)\b', Keyword),
(r'^([a-zA-Z][a-zA-Z0-9_]*:|\d+)', Name.Label),
(r'[a-zA-Z_][a-zA-Z0-9_]*(\$|%|#|&|!)?', Name.Variable),
(r'.', Text),
]
}
class LOLCODELexer(RegexLexer):
name = 'LOLCODE'
aliases = ['lolcode']
filenames = ['*.bas']
tokens = {
'root': [
(r'^OBTW\b.*?\bTLDR\b', Comment.Multiline),
(r'\bBTW\b.*\n', Comment.Single),
(r'\b(NERFIN|YA\s+RLY|BUKKIT|IS\s+NOW\s+A|MEBBE|GIMMEH|TIL|UPPIN|MKAY|TROOF|INTA|YR|!|NUMBR|OMG|NUMBAR|IF\s+U\s+SAY\s+SO|YARN|VISIBLE|I\s+HAS\s+A|IM\s+OUTTA\s+YR|IM\s+IN\s+YR|A|HAI|NO\s+WAI|GTFO|AN|R|FOUND\s+YR|OMGWTF|FAIL|O\s+RLY?|WTF\?|NOOB|HOW\s+DUZ\s+I|WIN|MAEK|OIC|PUTZ|KTHXBYE|ITZ|WILE|AT)(\b|(?=\s))', Keyword),
(r'\b(NOT|LENGZ\s+OF|CHARZ\s+OF|ORDZ\s+OF|SUM\s+OF|DIFF\s+OF|PRODUKT\s+OF|QUOSHUNT\s+OF|MOD\s+OF|BIGGR\s+OF|SMALLR\s+OF|BOTH\s+OF|EITHER\s+OF|WON\s+OF|BOTH\s+SAEM|DIFFRINT|ALL\s+OF|ANY\s+OF|SMOOSH|N)\b', Operator.Word),
(r'"(?::(?:[)>o":]|\([\dA-Fa-f]+\)|\{[A-Za-z]\w*\}|\[[^\[\]]+\])|[^":])*"', Literal.String),
(r'-?(\d+|\d+\.\d*|\.\d+)', Literal.Number),
(r'[a-zA-Z]\w*', Name.Variable),
(r',', Punctuation),
(r'.', Text),
]
}
class BloopLexer(RegexLexer):
name = 'Bloop'
aliases = ['bloop']
filenames = ['*.bloop']
flags = re.IGNORECASE | re.DOTALL
tokens = {
'root': [
(r'/\*.*?\*/', Comment.Multiline),
(r"'[^']*'", Literal.String),
(r'-?\d+', Literal.Number),
(r'\b(DEFINE|PROCEDURE|BLOCK|LOOP|AT|MOST|TIMES|MU_LOOP|CELL|OUTPUT|YES|NO|QUIT|ABORT|IF|THEN|AND|OR|PRINT|BEGIN|END)(\b|(?=\s))', Keyword),
(r'[A-Z]\w*', Name),
(r'[+*!=<>(){}":;,.-\[\]]', Punctuation),
(r'.', Text),
]
}
class EmoticonLexerHelper(RegexLexer):
tokens = {
'root': [
(r'\*\*([^*]|\*[^*])*\*\*', Comment),
(r'\S+[OC<>\[\]VD@PQ7L#${}\\/()|3E*]((?=\s)|$)', Keyword),
(r'\S+', Literal.String),
(r'-?\d+', Literal.Number),
(r'.', Text),
]
}
class EmoticonLexer(EmoticonLexerHelper):
name = 'Emoticon'
aliases = ['emoticon']
filenames = ['*.emo']
def get_tokens_unprocessed(self, text):
for index, token, value in EmoticonLexerHelper.get_tokens_unprocessed(self, text):
if token is Keyword:
yield index, Name, value[:-2]
yield index + len(value) - 2, Operator, value[-2]
yield index + len(value) - 2, Keyword, value[-1]
else:
yield index, token, value
class KaffeineLexer(JavascriptLexer):
name = 'Kaffeine'
aliases = ['kaffeine']
filenames = ['*.k']
def get_tokens_unprocessed(self, text):
for index, token, value in JavascriptLexer.get_tokens_unprocessed(self, text):
if token is Error and value in ['#', '@']:
token_type = Name.Tag if value == '#' else Keyword
yield index, token_type, value
else:
yield index, token, value
class JavascriptNextLexer(JavascriptLexer):
name = 'Javascript.next'
aliases = ['javascript.next', 'traceur']
filenames = ['*.jsn']
EXTRA_KEYWORDS = ['let', 'yield']
def get_tokens_unprocessed(self, text):
for index, token, value in JavascriptLexer.get_tokens_unprocessed(self, text):
if token is Name.Other and value in self.EXTRA_KEYWORDS:
yield index, Keyword, value
else:
yield index, token, value
class MoveLexer(JavascriptLexer):
name = 'Move'
aliases = ['move']
filenames = ['*.mv']
class ForthLexer(FactorLexer):
name = 'Forth'
aliases = ['forth']
filenames = ['*.4th']
class RoyLexer(RegexLexer):
name = 'Roy'
aliases = ['roy']
filenames = ['*.roy']
tokens = {
'root': [
(r'//.*\n', Comment.Single),
(r'\b(true|false|let|fn|if|then|else|data|type|match|case|do|return|macro|with)\b', Keyword),
(r'-?\d+', Literal.Number),
(r'\"[^"]*\"', Literal.String),
(r'<-|->|=|==|!=|\*|\+\+|\\', Operator),
(r'.', Text)
]
}
class APLLexer(RegexLexer):
name = 'APL'
aliases = ['apl']
filenames = ['*.apl']
tokens = {
'root': [
(r'.+', Text)
]
}
def getLexer(lexer_name):
lexers = [value for name, value in globals().items()
if name.endswith('Lexer') and hasattr(value, 'aliases')]
for lexer in lexers:
if lexer_name in lexer.aliases:
return lexer()
return get_lexer_by_name(lexer_name)
def main():
if len(sys.argv) == 2:
lexer = getLexer(sys.argv[1])
if lexer:
result = highlight(sys.stdin.read().decode('utf8'), lexer, HtmlFormatter())
result = result.replace('<div class="highlight"><pre>', '')
result = result.replace('</pre></div>', '')
print result.strip().encode('utf8')
else:
print 'Unknown language:', sys.argv[1]
else:
print 'Usage: pyg.py language < code.txt'
if __name__ == '__main__':
main()
| 30.744898 | 324 | 0.578659 | [
"MIT"
] | 1930sc/repl.it | pyg.py | 6,026 | Python |
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common conv layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from lingvo.core import base_layer
from lingvo.core import bn_layers
from lingvo.core import py_utils
from lingvo.core import tshape
def ComputeConvOutputShape(in_shape,
t_stride,
f_stride,
outc=None,
padding='SAME'):
"""Computes output shape for convolution and pooling layers.
If `in_shape` is a dynamic shape, the output will be Tensors, while if
`in_shape` is a list of ints then the output will also be a list of ints.
Args:
in_shape: A length 4 Tensor or list representing the input shape.
t_stride: The stride along the time dimension.
f_stride: The stride along the frequency dimension.
outc: The expected output channel. If None, will use the input channel.
padding: 'SAME' or 'VALID'.
Returns:
The expected output shape.
"""
# In the order of batch, time, frequency, channel
n = in_shape[0]
t = in_shape[1]
f = in_shape[2]
c = in_shape[3]
# Last two dimensions has to be specified.
assert f is not None and c is not None
if padding == 'VALID':
if t:
t -= t_stride - 1
f -= f_stride - 1
ot = t
if ot is not None:
ot = (ot + t_stride - 1) // t_stride
of = (f + f_stride - 1) // f_stride
if outc is None:
outc = c
return [n, ot, of, outc]
def ComputeConvOutputPadding(paddings, window, stride,
padding_algorithm='SAME'):
"""Computes paddings for convolution and pooling output.
out_padding[i] == 1 iff any in_padding corresponding to that output is 1.
Args:
paddings: The paddings tensor. It is expected to be of shape [batch, time].
window: The size of the windows.
stride: The time-stride between adjacent windows.
padding_algorithm: 'SAME' or 'VALID'.
Returns:
out_padding, The new padding tensor of size [batch, ceil(time / stride)].
"""
if stride == 1:
return paddings
# Pad so input_length divides stride.
input_length = py_utils.GetShape(paddings)[1]
pad_len = (input_length + stride - 1) // stride * stride - input_length
paddings = tf.pad(paddings, [[0, 0], [0, pad_len]], constant_values=1.0)
out_padding = tf.nn.pool(
tf.expand_dims(paddings, -1),
[window],
'MAX',
padding_algorithm,
strides=[stride],
)
return tf.squeeze(out_padding, -1)
class BaseConv2DLayerWithPadding(base_layer.BaseLayer):
"""Base class for 2D convolution layers."""
@classmethod
def Params(cls):
p = super(BaseConv2DLayerWithPadding, cls).Params()
p.Define(
'filter_shape', (0, 0, 0, 0),
'Filter shape. Must be a sequence of length 4. Elements are in'
' the order of height (time), width (frequency), in_channel,'
' out_channel. For causal convolution, filter_shape[0]'
' is the actual number of trained weights in the time dimension'
' of the kernel.')
p.Define(
'filter_stride', (1, 1),
'Filter stride to use. Must be a pair of ints. The first int'
' specifies the stride on the time dimension. The second int'
' specifies the stride on the frequency dimension.')
p.Define(
'dilation_rate', (1, 1),
'If > 1, dilation rate for atrous convolution. '
'Must be a pair of ints. '
'The first int specifies the dilation rate on the time dimension. '
'The second int specifies the dilation rate on the frequency '
'dimension. '
'If any value of dilation_rate is > 1, then all values of strides '
'must be 1.')
p.Define(
'weight_norm', False,
'If true, apply weight normalization to weights as proposed by'
' Salimans and Kingma, 2016: https://arxiv.org/abs/1602.07868')
return p
@base_layer.initializer
def __init__(self, params):
super(BaseConv2DLayerWithPadding, self).__init__(params)
p = self.params
assert p.name
assert len(p.filter_shape) == 4
assert len(p.filter_stride) == 2
assert all(x > 0 for x in p.filter_shape)
assert all(x > 0 for x in p.filter_stride)
assert len(p.dilation_rate) == 2
assert all(x > 0 for x in p.dilation_rate)
# Dilation and stride can't be combined.
if any(x > 1 for x in p.dilation_rate):
assert all(x == 1 for x in p.filter_stride)
@property
def output_channels(self):
"""The number of output channels for this conv layer."""
raise NotImplementedError()
@property
def input_channels(self):
"""The number of input channels for this conv layer."""
return self.params.filter_shape[2]
def OutShape(self, in_shape):
"""Compute the output shape given the input shape."""
p = self.params
return ComputeConvOutputShape(in_shape, p.filter_stride[0],
p.filter_stride[1], self.output_channels)
def FProp(self, theta, inputs, paddings):
"""Apply convolution to inputs.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: The inputs tensor. It is expected to be of shape [batch, time,
frequency, channel]. The time dimension corresponds to the height
dimension as in images and the frequency dimension corresponds to the
width dimension as in images.
paddings: The paddings tensor, expected to be of shape [batch, time].
Returns:
outputs, out_paddings pair.
"""
p = self.params
with tf.name_scope(p.name):
inputs = py_utils.with_dependencies([
py_utils.assert_shape_match(tf.shape(paddings), [-1, -1]),
py_utils.assert_shape_match(
tf.shape(inputs),
tf.concat([tf.shape(paddings), [-1, self.input_channels]], 0))
], inputs)
def _ApplyPadding(tensor_in, padding_in):
padding_expanded = tf.expand_dims(tf.expand_dims(padding_in, -1), -1)
return tensor_in * (1.0 - padding_expanded)
# Zeroing out padded inputs.
inputs = _ApplyPadding(inputs, paddings)
# Evaluate the conv kernel on 'inputs'.
out = self._EvaluateConvKernel(theta, inputs)
# NOTE: this may be slightly inaccurate when p.dilation_rate[0] > 1.
# But there's likely no real problems. Trying to set it gives an error:
# pooling with SAME padding is not implemented for dilation_rate > 1.
# NOTE: we use window=p.filter_stride[0] to be compatible with legacy
# implementation. Consider updating it to be the actual shape.
conv_padding = ComputeConvOutputPadding(
paddings, window=p.filter_stride[0], stride=p.filter_stride[0])
# Assuming padded nodes will be properly zero-ed out if necessary by
# sub-sequent layers.
# out = _ApplyPadding(out, conv_padding)
out = py_utils.HasShape(out, self.OutShape(tf.shape(inputs)))
return out, conv_padding
def _EvaluateConvKernel(self, theta, conv_input):
"""Evaluate the convolution kernel on input 'conv_input'."""
raise NotImplementedError
class Conv2DLayerWithPadding(BaseConv2DLayerWithPadding):
"""Conv2D layer."""
@base_layer.initializer
def __init__(self, params):
super(Conv2DLayerWithPadding, self).__init__(params)
p = self.params
assert p.name
w_pc = py_utils.WeightParams(
shape=p.filter_shape,
init=p.params_init,
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
with tf.variable_scope(p.name):
self.CreateVariable('w', w_pc)
if p.weight_norm:
self.CreateVariable(
'g',
py_utils.WeightParams(
shape=[p.filter_shape[-1]],
init=py_utils.WeightInit.Constant(0.0),
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars']))
@property
def output_channels(self):
"""The number of output channels for this conv layer."""
p = self.params
return p.filter_shape[-1]
def _GetWeight(self, theta):
p = self.params
if p.weight_norm:
# Normalize along the last dim (standard conv).
filter_w = tf.nn.l2_normalize(theta.w, [0, 1, 2]) * tf.reshape(
(theta.g + 1.0), [1, 1, 1, p.filter_shape[-1]])
else:
filter_w = theta.w
return filter_w
def _EvaluateConvKernel(self, theta, inputs):
"""Apply convolution to inputs."""
p = self.params
filter_w = self._GetWeight(theta)
return tf.nn.convolution(
inputs,
filter_w,
strides=p.filter_stride,
dilation_rate=p.dilation_rate,
data_format='NHWC',
padding='SAME')
class CausalConv2DLayerWithPadding(Conv2DLayerWithPadding):
"""2D conv layer with causal dependency on the time axis."""
@base_layer.initializer
def __init__(self, params):
super(CausalConv2DLayerWithPadding, self).__init__(params)
p = self.params
assert p.filter_shape[1] == 1, 'Only 1d causal convolution is supported.'
def _EvaluateConvKernel(self, theta, inputs):
"""Apply convolution to inputs."""
p = self.params
assert p.filter_shape[1] == 1, 'Only 1D causal convolutions supported.'
# Use VALID padding and shift the inputs to the right to ensure that the
# first output only depends on the first input and so on. The output is
# the same size as the input, as if the convolution used SAME padding.
padding_algorithm = 'VALID'
# The effective spatial filter width for dilated convolutions is
# (kernel_width - 1) * dilation_rate + 1 as according to
# https://www.tensorflow.org/api_docs/python/tf/nn/convolution.
causal_pad_size = (p.filter_shape[0] - 1) * p.dilation_rate[0]
inputs = tf.pad(inputs, [[0, 0], [causal_pad_size, 0], [0, 0], [0, 0]])
filter_w = self._GetWeight(theta)
return tf.nn.convolution(
inputs,
filter_w,
strides=p.filter_stride,
dilation_rate=p.dilation_rate,
data_format='NHWC',
padding=padding_algorithm)
class DepthwiseConv2DLayer(BaseConv2DLayerWithPadding):
"""Depthwise conv 2D layer.
paper: https://arxiv.org/abs/1610.02357
"""
@classmethod
def Params(cls):
p = super(DepthwiseConv2DLayer, cls).Params()
# Redefine 'filter_shape' since the semantic of shape elements is different
# from regular Conv2D.
p.Delete('filter_shape')
p.Define(
'filter_shape', (0, 0, 0, 0),
'Filter shape. Must be a sequence of length 4. Elements are in'
' the order of height (time), width (frequency), in_channel,'
' channel_multipliers. ')
return p
@base_layer.initializer
def __init__(self, params):
super(DepthwiseConv2DLayer, self).__init__(params)
p = self.params
assert p.name
w_pc = py_utils.WeightParams(
shape=p.filter_shape,
init=p.params_init,
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
with tf.variable_scope(p.name):
self.CreateVariable('w', w_pc)
if p.weight_norm:
self.CreateVariable(
'g',
py_utils.WeightParams(
shape=[p.filter_shape[2], p.filter_shape[3]],
init=py_utils.WeightInit.Constant(0.0),
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars']))
@property
def output_channels(self):
"""The number of output channels for this conv layer."""
p = self.params
# Depthwise convolution filter shape is:
# [..., in_channels, channel_multiplier].
return p.filter_shape[2] * p.filter_shape[3]
def _GetWeight(self, theta):
p = self.params
if p.weight_norm:
# Normalize along the last two dims.
filter_w = tf.nn.l2_normalize(theta.w, [0, 1]) * tf.reshape(
(theta.g + 1.0), [1, 1, p.filter_shape[2], p.filter_shape[3]])
else:
filter_w = theta.w
return filter_w
def _EvaluateConvKernel(self, theta, inputs):
"""Apply convolution to inputs."""
p = self.params
filter_w = self._GetWeight(theta)
return tf.nn.depthwise_conv2d(
inputs,
filter_w,
strides=[1, p.filter_stride[0], p.filter_stride[1], 1],
rate=p.dilation_rate,
data_format='NHWC',
padding='SAME')
class CausalDepthwiseConv2DLayer(DepthwiseConv2DLayer):
"""Depthwise conv layer with causal dependency on the time axis."""
@base_layer.initializer
def __init__(self, params):
super(CausalDepthwiseConv2DLayer, self).__init__(params)
p = self.params
assert p.filter_shape[1] == 1, 'Only 1d causal convolution is supported.'
def _EvaluateConvKernel(self, theta, inputs):
"""Apply convolution to inputs."""
p = self.params
assert p.filter_shape[1] == 1, 'Only 1D causal convolutions supported.'
# Use VALID padding and shift the inputs to the right to ensure that the
# first output only depends on the first input and so on. The output is
# the same size as the input, as if the convolution used SAME padding.
padding_algorithm = 'VALID'
# The effective spatial filter width for dilated convolutions is
# (kernel_width - 1) * dilation_rate + 1 as according to
# https://www.tensorflow.org/api_docs/python/tf/nn/convolution.
causal_pad_size = (p.filter_shape[0] - 1) * p.dilation_rate[0]
inputs = tf.pad(inputs, [[0, 0], [causal_pad_size, 0], [0, 0], [0, 0]])
filter_w = self._GetWeight(theta)
return tf.nn.depthwise_conv2d(
inputs,
filter_w,
strides=[1, p.filter_stride[0], p.filter_stride[1], 1],
rate=p.dilation_rate,
data_format='NHWC',
padding=padding_algorithm)
class NormalizedDepthwiseConv2DLayer(DepthwiseConv2DLayer):
"""DepthwiseConv2DLayer where weights are normalized over the time dim.
https://arxiv.org/abs/1901.10430
"""
@classmethod
def Params(cls):
p = super(NormalizedDepthwiseConv2DLayer, cls).Params()
p.Define('dropconnect_prob', 0.0,
'Prob at which DropConnect regularization is performed.')
p.Define('deterministic_dropout', False, 'Use determnisitc dropout or not.')
p.Define('temperature', 1.0,
'Temperature for the softmax normalization of the weights.')
p.Define('weight_tiling_factor', 1,
'Number of times weights are tiled over the input channels.')
return p
@base_layer.initializer
def __init__(self, params):
super(NormalizedDepthwiseConv2DLayer, self).__init__(params)
p = self.params
assert p.filter_shape[1] == 1, 'Only 1d convolution is supported.'
assert p.temperature > 0.0, 'Absolute zero temperature is not possible.'
@property
def output_channels(self):
"""The number of output channels for this conv layer."""
p = self.params
# Depthwise convolution filter shape is:
# [kernel_size, 1, in_channels, channel_multiplier].
return p.filter_shape[2] * p.filter_shape[3] * p.weight_tiling_factor
@property
def input_channels(self):
"""The number of output channels for this conv layer."""
p = self.params
return p.filter_shape[2] * p.weight_tiling_factor
def _GetWeight(self, theta):
p = self.params
filter_w = theta.w
# First normalize filter_w over the temporal dimension here.
filter_w = tf.nn.softmax(filter_w / p.temperature, axis=0)
# Add dropconnect on the weights for regularization.
if p.dropconnect_prob > 0.0 and not p.is_eval:
if p.deterministic_dropout:
filter_w = py_utils.DeterministicDropout(
filter_w, 1.0 - p.dropconnect_prob,
py_utils.GenerateStepSeedPair(p, theta.global_step))
else:
filter_w = tf.nn.dropout(
filter_w, 1.0 - p.dropconnect_prob, seed=p.random_seed)
# Tie the parameters of every subsequent number of weight_tiling_factor
# channels.
filter_w = tf.tile(filter_w, [1, 1, p.weight_tiling_factor, 1])
return filter_w
@classmethod
def FPropMeta(cls, p, inputs, paddings):
py_utils.CheckShapes((inputs, paddings))
b, t, f, ic = inputs
assert f == 1
oc = p.filter_shape[2] * p.filter_shape[3] * p.weight_tiling_factor
outputs = tshape.Shape([b, t, f, oc])
flops = b * t * f * p.filter_shape[0] * ic * oc * 5
return py_utils.NestedMap(flops=flops, out_shapes=(outputs, paddings))
class CausalNormalizedDepthwiseConv2DLayer(NormalizedDepthwiseConv2DLayer):
"""Depthwise conv layer with causal dependency on the time axis."""
def _EvaluateConvKernel(self, theta, inputs):
"""Apply convolution to inputs."""
# Same as CausalDepthwiseConv2DLayer.
p = self.params
assert p.filter_shape[1] == 1, 'Only 1D causal convolutions supported.'
padding_algorithm = 'VALID'
causal_pad_size = (p.filter_shape[0] - 1) * p.dilation_rate[0]
inputs = tf.pad(inputs, [[0, 0], [causal_pad_size, 0], [0, 0], [0, 0]])
filter_w = self._GetWeight(theta)
return tf.nn.depthwise_conv2d(
inputs,
filter_w,
strides=[1, p.filter_stride[0], p.filter_stride[1], 1],
rate=p.dilation_rate,
data_format='NHWC',
padding=padding_algorithm)
class ConvBatchNormLayer(bn_layers.BatchNormLayer):
"""A wrapper around regular BatchNormLayer that pass around the ...
paddings layers.
"""
def FProp(self, theta, inputs, paddings):
paddings_expanded = tf.expand_dims(tf.expand_dims(paddings, -1), -1)
bned = super(ConvBatchNormLayer, self).FProp(
theta, inputs, paddings_expanded)
return bned, paddings
# Supported activation functions.
_ACTIVATIONS = {
'RELU': tf.nn.relu,
'RELU6': tf.nn.relu6,
'SIGMOID': tf.sigmoid,
'TANH': tf.tanh,
'SWISH': tf.nn.swish,
'NONE': tf.identity,
}
class ActivationLayer(base_layer.BaseLayer):
"""Applies activation function to the inputs."""
@classmethod
def Params(cls):
p = super(ActivationLayer, cls).Params()
p.Define('activation', 'RELU',
'The activation function to apply')
return p
def FProp(self, theta, inputs, paddings):
p = self.params
out = _ACTIVATIONS[p.activation](inputs)
return out, paddings
class PaddingLayer(base_layer.BaseLayer):
"""Zeros out padded positions."""
def FProp(self, theta, inputs, paddings):
paddings_expanded = tf.expand_dims(tf.expand_dims(paddings, -1), -1)
return inputs * (1.0 - paddings_expanded), paddings
| 35.155393 | 80 | 0.663599 | [
"Apache-2.0"
] | zhoudoufu/lingvo | lingvo/core/conv_layers_with_time_padding.py | 19,230 | Python |
import torch
from recstudio.ann import sampler
from recstudio.data import dataset
from recstudio.model import basemodel, loss_func, scorer
r"""
HGN
########
Paper Reference:
Chen ma, et al. "HGN: Hierarchical Gating Networks for Sequential Recommendation" in KDD2019.
https://dl.acm.org/doi/abs/10.1145/3292500.3330984
"""
class HGNQueryEncoder(torch.nn.Module):
def __init__(self, fuid, fiid, num_users, embed_dim, max_seq_len, item_encoder, pooling_type='mean') -> None:
super().__init__()
self.fuid = fuid
self.fiid = fiid
self.item_encoder = item_encoder
self.pooling_type = pooling_type
self.user_embedding = torch.nn.Embedding(num_users, embed_dim, 0)
self.W_g_1 = torch.nn.Linear(embed_dim, embed_dim, bias=False)
self.W_g_2 = torch.nn.Linear(embed_dim, embed_dim, bias=False)
self.b_g = torch.nn.Parameter(torch.empty(embed_dim), requires_grad=True)
self.w_g_3 = torch.nn.Linear(embed_dim, 1, bias=False)
self.W_g_4 = torch.nn.Linear(embed_dim, max_seq_len)
def forward(self, batch):
U = self.user_embedding(batch[self.fuid])
S = self.item_encoder(batch['in_'+self.fiid])
S_F = S * torch.sigmoid(self.W_g_1(S) + self.W_g_2(U).view(U.size(0), 1, -1) + self.b_g)
weight = torch.sigmoid(self.w_g_3(S_F) + ([email protected]_g_4.weight[:S.size(1)].T).view(U.size(0), -1, 1)) # BxLx1
S_I = S_F * weight
if self.pooling_type == 'mean':
s = S_I.sum(1) / weight.sum(1)
elif self.pooling_type == 'max':
s = torch.max(S_I, dim=1).values
else:
raise ValueError("`pooling_type` only support `avg` and `max`")
query = U + s + S.sum(1)
return query
class HGN(basemodel.BaseRetriever):
r"""HGN proposes a hierarchical gating network, integrated with the Bayesian Personalized Ranking
(BPR) to capture both the long-term and short-term user interests. HGN consists of a feature
gating module, an instance gating module, and an item-item product module."""
def _get_dataset_class(self):
r"""The dataset is SeqDataset."""
return dataset.SeqDataset
def _get_query_encoder(self, train_data):
return HGNQueryEncoder(self.fuid, self.fiid, train_data.num_users, self.embed_dim, \
train_data.config['max_seq_len'], self.item_encoder, self.config['pooling_type'])
def _get_scorer_func(self):
return scorer.InnerProductScorer()
def _get_loss_func(self):
r"""BPR loss is used."""
return loss_func.BPRLoss()
def _get_sampler(self, train_data):
return sampler.UniformSampler(train_data.num_items-1)
| 36.797297 | 120 | 0.662505 | [
"MIT"
] | ustc-recsys/Torchrec | recstudio/model/seq/hgn.py | 2,723 | Python |
import time
import board
import debouncer
import busio as io
import digitalio
import pulseio
import adafruit_ssd1306
i2c = io.I2C(board.SCL, board.SDA)
reset_pin = digitalio.DigitalInOut(board.D11)
oled = adafruit_ssd1306.SSD1306_I2C(128, 32, i2c, reset=reset_pin)
button_select = debouncer.Debouncer(board.D7, mode=digitalio.Pull.UP)
button_play = debouncer.Debouncer(board.D9, mode=digitalio.Pull.UP)
C4 = 261
C_SH_4 = 277
D4 = 293
D_SH_4 = 311
E4 = 329
F4 = 349
F_SH_4 = 369
G4 = 392
G_SH_4 = 415
A4 = 440
A_SH_4 = 466
B4 = 493
# pylint: disable=line-too-long
songbook = {'Twinkle Twinkle': [(C4, 0.5), (C4, 0.5), (G4, 0.5), (G4, 0.5), (A4, 0.5), (A4, 0.5), (G4, 1.0), (0, 0.5),
(F4, 0.5), (F4, 0.5), (E4, 0.5), (E4, 0.5), (D4, 0.5), (D4, 0.5), (C4, 0.5), (0, 0.5),
(G4, 0.5), (G4, 0.5), (F4, 0.5), (F4, 0.5), (E4, 0.5), (E4, 0.5), (D4, 0.5), (0, 0.5),
(G4, 0.5), (G4, 0.5), (F4, 0.5), (F4, 0.5), (E4, 0.5), (E4, 0.5), (D4, 0.5), (0, 0.5),
(C4, 0.5), (C4, 0.5), (G4, 0.5), (G4, 0.5), (A4, 0.5), (A4, 0.5), (G4, 1.0), (0, 0.5),
(F4, 0.5), (F4, 0.5), (E4, 0.5), (E4, 0.5), (D4, 0.5), (D4, 0.5), (C4, 0.5), (0, 0.5)],
'ItsyBitsy Spider': [(G4, 0.5), (C4, 0.5), (C4, 0.5), (C4, 0.5), (D4, 0.5), (E4, 0.5), (E4, 0.5), (E4, 0.5), (D4, 0.5), (C4, 0.5), (D4, 0.5), (E4, 0.5), (C4, 0.5), (0, 0.5),
(E4, 0.5), (E4, 0.5), (F4, 0.5), (G4, 0.5), (G4, 0.5), (F4, 0.5), (E4, 0.5), (F4, 0.5), (G4, 0.5), (E4, 0.5), (0, 0.5)],
'Old MacDonald': [(G4, 0.5), (G4, 0.5), (G4, 0.5), (D4, 0.5), (E4, 0.5), (E4, 0.5), (D4, 0.5), (0, 0.5),
(B4, 0.5), (B4, 0.5), (A4, 0.5), (A4, 0.5), (G4, 0.5), (0, 0.5),
(D4, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (D4, 0.5), (E4, 0.5), (E4, 0.5), (D4, 0.5), (0, 0.5),
(B4, 0.5), (B4, 0.5), (A4, 0.5), (A4, 0.5), (G4, 0.5), (0, 0.5),
(D4, 0.5), (D4, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (D4, 0.5), (D4, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (0, 0.5),
(G4, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (0, 0.5),
(G4, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (G4, 0.5), (0, 0.5),
(G4, 0.5), (G4, 0.5), (G4, 0.5), (D4, 0.5), (E4, 0.5), (E4, 0.5), (D4, 0.5), (0, 0.5),
(B4, 0.5), (B4, 0.5), (A4, 0.5), (A4, 0.5), (G4, 0.5), (0, 0.5)]
}
# pylint: enable=line-too-long
def play_note(note):
if note[0] != 0:
pwm = pulseio.PWMOut(board.D12, duty_cycle = 0, frequency=note[0])
# Hex 7FFF (binary 0111111111111111) is half of the largest value for a 16-bit int,
# i.e. 50%
pwm.duty_cycle = 0x7FFF
time.sleep(note[1])
if note[0] != 0:
pwm.deinit()
def play_song(songname):
for note in songbook[songname]:
play_note(note)
def update(songnames, selected):
oled.fill(0)
line = 0
for songname in songnames:
if line == selected:
oled.text(">", 0, line * 8)
oled.text(songname, 10, line * 8)
line += 1
oled.show()
selected_song = 0
song_names = sorted(list(songbook.keys()))
while True:
button_select.update()
button_play.update()
update(song_names, selected_song)
if button_select.fell:
print("select")
selected_song = (selected_song + 1) % len(songbook)
elif button_play.fell:
print("play")
play_song(song_names[selected_song])
| 41.433333 | 185 | 0.448914 | [
"MIT"
] | ChrisKuhi/Adafruit_Learning_System_Guides | CircuitPython_101/basic_data_structures/song_book/code.py | 3,729 | Python |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-02-23 18:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('voting', '0002_auto_20170223_1054'),
]
operations = [
migrations.RemoveField(
model_name='voteballot',
name='vote_candidate',
),
migrations.AddField(
model_name='voteballot',
name='candidates',
field=models.ManyToManyField(related_name='vote_ballot', to='voting.Candidate', verbose_name="Vote's Candidate"),
),
migrations.AddField(
model_name='voteballot',
name='position',
field=models.CharField(choices=[('P', 'President'), ('A', 'VP of Administration'), ('T', 'Treasurer'), ('S', 'VP of Service'), ('N', 'VP of New Member Services'), ('O', 'VP of Social Affairs'), ('J', 'VP of Standards'), ('R', 'Risk Management'), ('B', 'Standards Board')], default='P', max_length=1),
),
]
| 35.433333 | 312 | 0.594544 | [
"MIT"
] | TexasLAN/texaslan.org | texaslan/voting/migrations/0003_auto_20170223_1207.py | 1,063 | Python |
#!/usr/bin/env python3
# Copyright (c) 2017-2020 The Mantle Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool acceptance of raw transactions."""
from decimal import Decimal
from io import BytesIO
import math
from test_framework.test_framework import MantleTestFramework
from test_framework.key import ECKey
from test_framework.messages import (
BIP125_SEQUENCE_NUMBER,
COIN,
COutPoint,
CTransaction,
CTxOut,
MAX_BLOCK_BASE_SIZE,
MAX_MONEY,
)
from test_framework.script import (
hash160,
CScript,
OP_0,
OP_2,
OP_3,
OP_CHECKMULTISIG,
OP_EQUAL,
OP_HASH160,
OP_RETURN,
)
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
hex_str_to_bytes,
)
class MempoolAcceptanceTest(MantleTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [[
'-txindex','-permitbaremultisig=0',
]] * self.num_nodes
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def check_mempool_result(self, result_expected, *args, **kwargs):
"""Wrapper to check result of testmempoolaccept on node_0's mempool"""
result_test = self.nodes[0].testmempoolaccept(*args, **kwargs)
assert_equal(result_expected, result_test)
assert_equal(self.nodes[0].getmempoolinfo()['size'], self.mempool_size) # Must not change mempool state
def run_test(self):
node = self.nodes[0]
self.log.info('Start with empty mempool, and 200 blocks')
self.mempool_size = 0
assert_equal(node.getblockcount(), 200)
assert_equal(node.getmempoolinfo()['size'], self.mempool_size)
coins = node.listunspent()
self.log.info('Should not accept garbage to testmempoolaccept')
assert_raises_rpc_error(-3, 'Expected type array, got string', lambda: node.testmempoolaccept(rawtxs='ff00baar'))
assert_raises_rpc_error(-8, 'Array must contain exactly one raw transaction for now', lambda: node.testmempoolaccept(rawtxs=['ff00baar', 'ff22']))
assert_raises_rpc_error(-22, 'TX decode failed', lambda: node.testmempoolaccept(rawtxs=['ff00baar']))
self.log.info('A transaction already in the blockchain')
coin = coins.pop() # Pick a random coin(base) to spend
raw_tx_in_block = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[{'txid': coin['txid'], 'vout': coin['vout']}],
outputs=[{node.getnewaddress(): 0.3}, {node.getnewaddress(): 49}],
))['hex']
txid_in_block = node.sendrawtransaction(hexstring=raw_tx_in_block, maxfeerate=0)
node.generate(1)
self.mempool_size = 0
self.check_mempool_result(
result_expected=[{'txid': txid_in_block, 'allowed': False, 'reject-reason': 'txn-already-known'}],
rawtxs=[raw_tx_in_block],
)
self.log.info('A transaction not in the mempool')
fee = Decimal('0.000007')
raw_tx_0 = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[{"txid": txid_in_block, "vout": 0, "sequence": BIP125_SEQUENCE_NUMBER}], # RBF is used later
outputs=[{node.getnewaddress(): Decimal('0.3') - fee}],
))['hex']
tx = CTransaction()
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
txid_0 = tx.rehash()
self.check_mempool_result(
result_expected=[{'txid': txid_0, 'allowed': True, 'vsize': tx.get_vsize(), 'fees': {'base': fee}}],
rawtxs=[raw_tx_0],
)
self.log.info('A final transaction not in the mempool')
coin = coins.pop() # Pick a random coin(base) to spend
output_amount = Decimal('0.025')
raw_tx_final = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[{'txid': coin['txid'], 'vout': coin['vout'], "sequence": 0xffffffff}], # SEQUENCE_FINAL
outputs=[{node.getnewaddress(): output_amount}],
locktime=node.getblockcount() + 2000, # Can be anything
))['hex']
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_final)))
fee_expected = coin['amount'] - output_amount
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': True, 'vsize': tx.get_vsize(), 'fees': {'base': fee_expected}}],
rawtxs=[tx.serialize().hex()],
maxfeerate=0,
)
node.sendrawtransaction(hexstring=raw_tx_final, maxfeerate=0)
self.mempool_size += 1
self.log.info('A transaction in the mempool')
node.sendrawtransaction(hexstring=raw_tx_0)
self.mempool_size += 1
self.check_mempool_result(
result_expected=[{'txid': txid_0, 'allowed': False, 'reject-reason': 'txn-already-in-mempool'}],
rawtxs=[raw_tx_0],
)
self.log.info('A transaction that replaces a mempool transaction')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
tx.vout[0].nValue -= int(fee * COIN) # Double the fee
tx.vin[0].nSequence = BIP125_SEQUENCE_NUMBER + 1 # Now, opt out of RBF
raw_tx_0 = node.signrawtransactionwithwallet(tx.serialize().hex())['hex']
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
txid_0 = tx.rehash()
self.check_mempool_result(
result_expected=[{'txid': txid_0, 'allowed': True, 'vsize': tx.get_vsize(), 'fees': {'base': (2 * fee)}}],
rawtxs=[raw_tx_0],
)
self.log.info('A transaction that conflicts with an unconfirmed tx')
# Send the transaction that replaces the mempool transaction and opts out of replaceability
node.sendrawtransaction(hexstring=tx.serialize().hex(), maxfeerate=0)
# take original raw_tx_0
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
tx.vout[0].nValue -= int(4 * fee * COIN) # Set more fee
# skip re-signing the tx
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'txn-mempool-conflict'}],
rawtxs=[tx.serialize().hex()],
maxfeerate=0,
)
self.log.info('A transaction with missing inputs, that never existed')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
tx.vin[0].prevout = COutPoint(hash=int('ff' * 32, 16), n=14)
# skip re-signing the tx
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'missing-inputs'}],
rawtxs=[tx.serialize().hex()],
)
self.log.info('A transaction with missing inputs, that existed once in the past')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
tx.vin[0].prevout.n = 1 # Set vout to 1, to spend the other outpoint (49 coins) of the in-chain-tx we want to double spend
raw_tx_1 = node.signrawtransactionwithwallet(tx.serialize().hex())['hex']
txid_1 = node.sendrawtransaction(hexstring=raw_tx_1, maxfeerate=0)
# Now spend both to "clearly hide" the outputs, ie. remove the coins from the utxo set by spending them
raw_tx_spend_both = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[
{'txid': txid_0, 'vout': 0},
{'txid': txid_1, 'vout': 0},
],
outputs=[{node.getnewaddress(): 0.1}]
))['hex']
txid_spend_both = node.sendrawtransaction(hexstring=raw_tx_spend_both, maxfeerate=0)
node.generate(1)
self.mempool_size = 0
# Now see if we can add the coins back to the utxo set by sending the exact txs again
self.check_mempool_result(
result_expected=[{'txid': txid_0, 'allowed': False, 'reject-reason': 'missing-inputs'}],
rawtxs=[raw_tx_0],
)
self.check_mempool_result(
result_expected=[{'txid': txid_1, 'allowed': False, 'reject-reason': 'missing-inputs'}],
rawtxs=[raw_tx_1],
)
self.log.info('Create a signed "reference" tx for later use')
raw_tx_reference = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[{'txid': txid_spend_both, 'vout': 0}],
outputs=[{node.getnewaddress(): 0.05}],
))['hex']
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
# Reference tx should be valid on itself
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': True, 'vsize': tx.get_vsize(), 'fees': { 'base': Decimal('0.1') - Decimal('0.05')}}],
rawtxs=[tx.serialize().hex()],
maxfeerate=0,
)
self.log.info('A transaction with no outputs')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout = []
# Skip re-signing the transaction for context independent checks from now on
# tx.deserialize(BytesIO(hex_str_to_bytes(node.signrawtransactionwithwallet(tx.serialize().hex())['hex'])))
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-txns-vout-empty'}],
rawtxs=[tx.serialize().hex()],
)
self.log.info('A really large transaction')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vin = [tx.vin[0]] * math.ceil(MAX_BLOCK_BASE_SIZE / len(tx.vin[0].serialize()))
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-txns-oversize'}],
rawtxs=[tx.serialize().hex()],
)
self.log.info('A transaction with negative output value')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout[0].nValue *= -1
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-txns-vout-negative'}],
rawtxs=[tx.serialize().hex()],
)
# The following two validations prevent overflow of the output amounts (see CVE-2010-5139).
self.log.info('A transaction with too large output value')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout[0].nValue = MAX_MONEY + 1
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-txns-vout-toolarge'}],
rawtxs=[tx.serialize().hex()],
)
self.log.info('A transaction with too large sum of output values')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout = [tx.vout[0]] * 2
tx.vout[0].nValue = MAX_MONEY
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-txns-txouttotal-toolarge'}],
rawtxs=[tx.serialize().hex()],
)
self.log.info('A transaction with duplicate inputs')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vin = [tx.vin[0]] * 2
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-txns-inputs-duplicate'}],
rawtxs=[tx.serialize().hex()],
)
self.log.info('A coinbase transaction')
# Pick the input of the first tx we signed, so it has to be a coinbase tx
raw_tx_coinbase_spent = node.getrawtransaction(txid=node.decoderawtransaction(hexstring=raw_tx_in_block)['vin'][0]['txid'])
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_coinbase_spent)))
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'coinbase'}],
rawtxs=[tx.serialize().hex()],
)
self.log.info('Some nonstandard transactions')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.nVersion = 3 # A version currently non-standard
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'version'}],
rawtxs=[tx.serialize().hex()],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout[0].scriptPubKey = CScript([OP_0]) # Some non-standard script
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'scriptpubkey'}],
rawtxs=[tx.serialize().hex()],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
key = ECKey()
key.generate()
pubkey = key.get_pubkey().get_bytes()
tx.vout[0].scriptPubKey = CScript([OP_2, pubkey, pubkey, pubkey, OP_3, OP_CHECKMULTISIG]) # Some bare multisig script (2-of-3)
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bare-multisig'}],
rawtxs=[tx.serialize().hex()],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vin[0].scriptSig = CScript([OP_HASH160]) # Some not-pushonly scriptSig
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'scriptsig-not-pushonly'}],
rawtxs=[tx.serialize().hex()],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vin[0].scriptSig = CScript([b'a' * 1648]) # Some too large scriptSig (>1650 bytes)
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'scriptsig-size'}],
rawtxs=[tx.serialize().hex()],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
output_p2sh_burn = CTxOut(nValue=540, scriptPubKey=CScript([OP_HASH160, hash160(b'burn'), OP_EQUAL]))
num_scripts = 100000 // len(output_p2sh_burn.serialize()) # Use enough outputs to make the tx too large for our policy
tx.vout = [output_p2sh_burn] * num_scripts
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'tx-size'}],
rawtxs=[tx.serialize().hex()],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout[0] = output_p2sh_burn
tx.vout[0].nValue -= 1 # Make output smaller, such that it is dust for our policy
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'dust'}],
rawtxs=[tx.serialize().hex()],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout[0].scriptPubKey = CScript([OP_RETURN, b'\xff'])
tx.vout = [tx.vout[0]] * 2
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'multi-op-return'}],
rawtxs=[tx.serialize().hex()],
)
self.log.info('A timelocked transaction')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vin[0].nSequence -= 1 # Should be non-max, so locktime is not ignored
tx.nLockTime = node.getblockcount() + 1
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'non-final'}],
rawtxs=[tx.serialize().hex()],
)
self.log.info('A transaction that is locked by BIP68 sequence logic')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vin[0].nSequence = 2 # We could include it in the second block mined from now, but not the very next one
# Can skip re-signing the tx because of early rejection
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'non-BIP68-final'}],
rawtxs=[tx.serialize().hex()],
maxfeerate=0,
)
if __name__ == '__main__':
MempoolAcceptanceTest().main()
| 48.050296 | 154 | 0.634382 | [
"MIT"
] | Mantle-One/mantlecoin | test/functional/mempool_accept.py | 16,241 | Python |
#!/usr/bin/env python
import os
import sys
import argparse
import subprocess
import glob
import math
from EMAN2 import *
def file_base(movie):
# return the filename and basename, exclude '.p3'
return movie, os.path.basename(os.path.splitext(movie)[0]).replace('.p3', '')
def check(log,c_p):
with open(log) as log_r:
lines = [line for line in log_r]
x0 = 0
y0 = 0
f = c_p['throw']
bad = []
while len(lines) > 0:
line1 = lines.pop(0)
if "...... Frame (" in line1:
line = line1.strip().split()
x = float(line[-2])
y = float(line[-1])
if math.sqrt((x - x0)**2 + (y - y0)**2) * c_p['apixr'] > c_p['target']:
bad += [f]
f += 1
x0 = x
y0 = y
return bad
def run_motioncor2(movie, c_p):
movie, basename = file_base(movie)
# generate the com file
out = basename+'_throw{:03}'.format(c_p['throw'])
o_com = out + '.com'
o_log = out + '.log'
o_mrc = out + '.mrc'
common = 'motioncor2 -InMrc {} -OutMrc {} -Iter 10 -Bft 100 -FtBin {} -Throw {} -FmRef -1 -Tilt {} {}'.format(movie,o_mrc,c_p['bin'],c_p['throw'],c_p['tilt'], c_p['gainref'])
with open(o_com, 'w') as o_com_w:
if c_p['local'] == 0:
o_com_w.write('{} -Patch 0 0'.format(common))
else:
o_com_w.write('{} -Patch {} {} -LogFile {} -FmDose {} -PixSize {} -kV {}'.format(common,c_p['patch'],c_p['patch'],out+'_',c_p['dose'],c_p['apixr'],c_p['voltage']))
# run the com
with open(o_log, 'w') as write_log:
subprocess.call(['sh', o_com], stdout=write_log, stderr=subprocess.STDOUT)
# check the shifts
bad = check(o_log,c_p)
# decide bad
decide(movie, bad, c_p)
def decide(movie, bad, c_p):
if bad == []:
if c_p['local'] == 0:
print "No bad frames. Do local now."
c_p['local'] = 1
run_motioncor2(movie, c_p)
else:
print "No bad frames. Local done for {}. Throwed the first {} frames.".format(movie, c_p['throw'])
elif max(bad) < c_p['maxthrow']:
c_p['throw'] = max(bad)
print "Throw the first {} frames.".format(c_p['throw']), "Bad frames: ", bad
run_motioncor2(movie, c_p)
else: # if too many bad frames
print '{} has too many bad frames: '.format(movie), bad
def main():
progname = os.path.basename(sys.argv[0])
usage = progname + """ [options] <movies>
Output unfiltered and filtered sum using MotionCor2.
Automatically discard bad frames.
Needs:
'motioncor2' command (v1, Zheng et al., 2017)
'EMAN2' python module (v2.11, Tang et al., 2007)
"""
args_def = {'apix':1.315, 'apixr':0.6575, 'bin':1, 'patch':5, 'voltage':300, 'time':200, 'rate':7, 'target':5, 'tilt':'0 0', 'gainref':''}
parser = argparse.ArgumentParser()
parser.add_argument("movie", nargs='*', help="specify movies (mrc, mrcs, dm4) to be processed")
parser.add_argument("-a", "--apix", type=float, help="specify counting apix, by default {}".format(args_def['apix']))
parser.add_argument("-ar", "--apixr", type=float, help="specify real apix of input movie, by default {}".format(args_def['apixr']))
parser.add_argument("-b", "--bin", type=float, help="specify binning factor, by default {}".format(args_def['bin']))
parser.add_argument("-p", "--patch", type=int, help="specify the patch, by default {}".format(args_def['patch']))
parser.add_argument("-v", "--voltage", type=int, help="specify the voltage (kV), by default {}".format(args_def['voltage']))
parser.add_argument("-t", "--time", type=float, help="specify exposure time per frame in ms, by default {}".format(args_def['time']))
parser.add_argument("-r", "--rate", type=float, help="specify dose rate in e/pix/s (counting pixel, not superresolution), by default {}".format(args_def['rate']))
parser.add_argument("-ta", "--target", type=float, help="specify the target resolution, by default {}".format(args_def['target']))
parser.add_argument("-ti", "--tilt", type=str, help="specify the tilt, by default {}".format(args_def['tilt']))
parser.add_argument("-g", "--gainref", type=str, help="specify the gainref option, by default {}. e.g., '-Gain ../14sep05c_raw_196/norm-amibox05-0.mrc -RotGain 0 -FlipGain 1'".format(args_def['gainref']))
args = parser.parse_args()
if len(sys.argv) == 1:
print "usage: " + usage
print "Please run '" + progname + " -h' for detailed options."
sys.exit(1)
# get default values
for i in args_def:
if args.__dict__[i] == None:
args.__dict__[i] = args_def[i]
# get common parameters
dose = args.time/1000.0 * args.rate / args.apix ** 2
voltage = args.voltage
c_p = {'dose':dose, 'apix':args.apix, 'apixr':args.apixr, 'bin':args.bin, 'patch':args.patch, 'voltage':voltage, 'target':args.target, 'tilt':args.tilt, 'throw':0, 'gainref':args.gainref}
# loop over all the input movies
for movie in args.movie:
if movie[-3:] == '.gz':
subprocess.call(['gunzip', movie])
movie = movie[:-3]
basename = os.path.basename(os.path.splitext(movie)[0])
suffix = os.path.basename(os.path.splitext(movie)[1])
basename_raw = basename
# unify mrc and mrcs to mrcs format
m = basename+'.p3.mrcs'
if suffix in ['.mrc','.mrcs']:
os.symlink(movie, m)
movie, basename = file_base(m)
# get nimg
c_p['nimg'] = EMUtil.get_image_count(movie)
# convert dm4 to mrcs
if suffix == '.dm4':
for i in xrange(c_p['nimg']):
d=EMData(movie, i)
d.write_image(m, i)
movie, basename = file_base(m)
# here we assume 36e is the maximal dose that still contributes to visualization of protein side chains, and a total of 20e is the minimum to ensure good alignment. therefore, you can throw the first 16e at most.
c_p['maxthrow'] = min(16/dose, c_p['nimg'] - 20/dose)
# motioncor2
c_p['local'] = 0 #0 means no local, only global
c_p['throw'] = 0
run_motioncor2(movie, c_p)
# delete intermediate files, they contain '.p3.'
for i in glob.glob(basename_raw + '*.p3.*'):
os.unlink(i)
if __name__ == '__main__':
main()
| 41.092199 | 214 | 0.654298 | [
"MIT"
] | emkailu/PAT3DEM | bin/p3motioncor2.py | 5,794 | Python |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'tables/$', views.report_tables, name='tables'),
url(r'^api/stop_enforcement/', views.stop_enforcement_json_view, name='stop_enforcement'),
url(r'^api/residency/', views.resident_json_view, name='residency'),
url(r'^api/nature_of_stops/', views.nature_of_stops_json_view, name='nature_of_stop'),
url(r'^api/disposition/', views.disposition_json_view, name='disposition'),
url(r'^api/statutory_authority/', views.statutory_authority_json_view, name='stop_authority'),
url(r'^api/stops_by_month/', views.monthly_stops_json_view, name='stops_by_month'),
url(r'^api/stops_by_hour/', views.stops_by_hour_json_view, name='stops_by_hour'),
url(r'^api/stops_by_age/', views.stops_by_age_json_view, name='stops_by_age'),
url(r'^api/search_information/', views.search_information_json_view, name='search_information'),
url(r'^api/search_authority/', views.search_authority_json_view, name='search_authority'),
url(r'^api/traffic_stops/', views.traffic_stops_json_view, name='stops'),
url(r'^api/departments/', views.department_json_view, name='departments')
]
| 62.157895 | 100 | 0.751058 | [
"MIT"
] | CT-Data-Collaborative/ctrp3_v2 | ctrp3_py3/reports/urls.py | 1,181 | Python |
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mo.front.extractor import FrontExtractorOp
from mo.front.kaldi.extractors.fixed_affine_component_ext import FixedAffineComponentFrontExtractor
from mo.front.kaldi.utils import read_learning_info
from mo.graph.graph import Node
class AffineComponentFrontExtractor(FrontExtractorOp):
op = 'affinecomponent'
enabled = True
@staticmethod
def extract(node: Node):
read_learning_info(node.parameters)
return FixedAffineComponentFrontExtractor.extract(node)
| 35.733333 | 99 | 0.785448 | [
"Apache-2.0"
] | AlexeyAB/dldt | model-optimizer/mo/front/kaldi/extractors/affine_component_ext.py | 1,072 | Python |
# import unittest
import pytest
# from ci_testing_python.app.identidock import app
if __name__ == '__main__':
# unittest.main()
pytest.main()
| 14.7 | 50 | 0.741497 | [
"MIT"
] | anirbanroydas/MChat-Mosquitto-MQTT | tests/contract/test_contract_identidock.py | 147 | Python |
def solve(n):
a = []
for _ in range(n):
name, h = input().split()
h = float(h)
a.append((name, h))
a.sort(key = lambda t: t[1], reverse=True)
m = a[0][1]
for n, h in a:
if h != m: break
print(n, end = " ")
print()
while True:
n = int(input())
if n == 0: break
solve(n)
| 18.473684 | 46 | 0.433048 | [
"MIT"
] | honux77/algorithm | prompt412/round-101/c.py | 351 | Python |
import numpy as np
import matplotlib.pyplot as plt
import time
import csv
import os
import scipy.io as mat4py
import logging
logger = logging.getLogger("logger")
class ResultBuffer(object):
def __init__(self, log_path, episode_types):
self.log_path = log_path
self.current_episode = None
self.episodes = {e_type: list() for e_type in episode_types}
self.average_reward = 0.0
self.initial_reward = 0.0
self.average_reward_counter = 0
self.n_cluster = 0
for episode_type in self.episodes.keys():
with open(os.path.join(self.log_path,'{}.csv'.format(episode_type)), mode='w') as result_file:
writer = csv.writer(result_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(self.title_csv())
def update_episode(self, **kwargs):
if self.current_episode is None:
raise ValueError("There is no initiated episodes object")
self.current_episode.add(**kwargs)
def add_episode(self, episode_type, lr, noise_std, buffer_size):
if episode_type in self.episodes.keys():
idx = len(self.episodes[episode_type])
episode_name = "{}_{:03d}".format(episode_type,idx)
self.episodes[episode_type].append(Episode(episode_name, lr, noise_std, buffer_size, self.average_reward))
self.current_episode = self.episodes[episode_type][-1]
else:
raise ValueError("Invalid episode type added to result buffer")
def finalize_episode(self, update_average_reward=None):
self.current_episode.summarize()
if update_average_reward is not None:
new_average = self.current_episode.final_stats['online_rewards']
if np.abs(new_average-self.initial_reward) > 0.05:
self.initial_reward = new_average
self.average_reward_counter = 0
self.average_reward = (self.average_reward_counter * self.average_reward + new_average) / (self.average_reward_counter + 1)
self.average_reward_counter += 1
logger.info(self.current_episode)
self.write_all()
def write_all(self):
for episode_type in self.episodes.keys():
with open(os.path.join(self.log_path,'{}.csv'.format(episode_type)), mode='a') as result_file:
writer = csv.writer(result_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for i, episode in enumerate(self.episodes[episode_type]):
if episode is not None:
if "eval" in episode.name:
try:
episode.save(self.log_path)
except:
logger.info("Saving state evolution failed")
writer.writerow(episode.csv())
self.episodes[episode_type][i] = None
@staticmethod
def title():
text = list()
text.append('{:^20}'.format('Epi'))
text.append('{:^10}'.format('time'))
text.append('{:^9}'.format('lr'))
text.append('{:^9}'.format('noise'))
text.append('{:^12}'.format('buffer size'))
text.append('{:^9}'.format('#of updates'))
text.append('{:^20}'.format('average_reward'))
text.append('{:^20}'.format('actor grad norm'))
text.append('{:^20}'.format('critic grad norm'))
text.append('{:^9}'.format('q_loss'))
text.append('{:^6}'.format('rewards'))
return " | ".join(text)
@staticmethod
def title_csv():
text = list()
text.append('{}'.format('Epi'))
text.append('{}'.format('time'))
text.append('{}'.format('lr'))
text.append('{}'.format('noise'))
text.append('{}'.format('buffer size'))
text.append('{}'.format('#of updates'))
text.append('{}'.format('average_reward'))
text.append('{}'.format('actor grad norm'))
text.append('{}'.format('critic grad norm'))
text.append('{}'.format('q_loss'))
text.append('{}'.format('rewards'))
return text
class Episode(object):
def __init__(self, name, lr, noise_std, buffer_size, average_reward):
# general stats
self.name = name
self.average_reward = average_reward
self.lr = lr
self.noise_std = noise_std
self.buffer_size = buffer_size
self.total_time = time.time()
# training stats
self.stats = dict()
self.final_stats = dict()
def add(self, **kwargs):
for key,val in kwargs.items():
if key not in self.stats.keys():
self.stats[key] = list()
self.stats[key].append(val)
def summarize(self):
# updates counter
if 'global_step_critic' in self.stats.keys():
self.final_stats['global_step'] = self.stats['global_step_critic']
# average rewards
if 'online_rewards' in self.stats.keys():
self.stats['online_rewards'] = np.array(self.stats['online_rewards'])
self.stats['online_rewards'] = np.reshape(self.stats['online_rewards'], [self.stats['online_rewards'].shape[1], -1])
self.final_stats['online_rewards'] = np.mean(self.stats['online_rewards'][:,10:])
# value function error
if 'q_loss' in self.stats.keys():
self.final_stats['q_loss'] = np.mean(self.stats['q_loss'])
# state/action/disturbance evolution
if 'states' in self.stats.keys():
self.final_stats['states'] = np.transpose(np.squeeze(np.array(self.stats['states'])))
if 'actions' in self.stats.keys():
self.final_stats['actions'] = np.swapaxes(np.array(self.stats['actions']), 0, 1)
if 'disturbance' in self.stats.keys():
self.final_stats['disturbance'] = np.transpose(np.array(self.stats['disturbance']))
# gradient stats
if 'g_norm_critic' in self.stats.keys():
self.final_stats['g_norm_critic'] = (np.mean(np.squeeze(np.array(self.stats['g_norm_critic']))),
np.min(np.squeeze(np.array(self.stats['g_norm_critic']))),
np.max(np.squeeze(np.array(self.stats['g_norm_critic']))))
if 'g_norm_actor' in self.stats.keys():
self.final_stats['g_norm_actor'] = (np.mean(np.squeeze(np.array(self.stats['g_norm_actor']))),
np.min(np.squeeze(np.array(self.stats['g_norm_actor']))),
np.max(np.squeeze(np.array(self.stats['g_norm_actor']))))
if 'global_step_actor' in self.stats.keys():
self.final_stats['global_step'] = self.stats['global_step_actor'][-1]
self.total_time = time.time() - self.total_time
del self.stats
def save(self, path):
mat4py.savemat(os.path.join(path, "states", 'states_evol.mat'), {'states': self.final_stats['states']})
mat4py.savemat(os.path.join(path, "states", 'actions_evol.mat'), {'actions': self.final_stats['actions']})
mat4py.savemat(os.path.join(path, "states", 'outputs_evol.mat'), {'disturbance': self.final_stats['disturbance']})
def csv(self):
text = list()
text.append('{}'.format(self.name))
text.append('{:.1f}'.format(self.total_time))
if "eval" not in self.name:
text.append('{:.2e}'.format(self.lr))
text.append('{:.2e}'.format(self.noise_std))
text.append('{}'.format(self.buffer_size))
text.append('{}'.format(self.final_stats['global_step']))
text.append('{:^20}'.format(self.average_reward))
if "eval" not in self.name:
text.append('{}'.format(self.final_stats['g_norm_actor']))
text.append('{}'.format(self.final_stats['g_norm_critic']))
text.append('{:.2e}'.format(self.final_stats['q_loss']))
text.append('{:.5f}'.format(self.final_stats['online_rewards']))
return text
def __repr__(self):
text = list()
text.append('{:^20}'.format(self.name))
text.append('{:^10.1f}'.format(self.total_time))
if "eval" not in self.name:
text.append('{:^9.2e}'.format(self.lr))
text.append('{:^9.2e}'.format(self.noise_std))
text.append('{:^d}'.format(self.buffer_size))
text.append('{}'.format(self.final_stats['global_step']))
text.append('{:^20}'.format(self.average_reward))
if "eval" not in self.name:
mi, ma, mea = self.final_stats['g_norm_actor']
text.append('{:5.2e},{:5.2e},{:5.2e}'.format(mi, ma, mea))
mi, ma, mea = self.final_stats['g_norm_critic']
text.append('{:5.2e},{:5.2e},{:5.2e}'.format(mi, ma, mea))
text.append('{:^10.2e}'.format(self.final_stats['q_loss']))
if "pol" in self.name:
mi, ma, mea = self.final_stats['g_norm_critic']
text.append('{:5.2e},{:5.2e},{:5.2e}'.format(mi, ma, mea))
text.append('{:^10.2e}'.format(self.final_stats['q_loss']))
if len(self.final_stats.keys()) > 0 :
text.append('{:^6.5f}'.format(self.final_stats['online_rewards']))
return " | ".join(text)
class Figure(object):
def __init__(self, name, log_path, y_data, x_data=None, options = None, labels = None):
self.fig = plt.figure()
self.fig.set_size_inches(18.5, 10.5)
for y in y_data:
plt.plot(x_data, y)
plt.legend(labels)
plt.title(" ".join(name.split("_")))
self.fig.savefig(os.path.join(log_path, "plots", name))
plt.close()
| 41.651064 | 135 | 0.581324 | [
"MIT"
] | zivaharoni/capacity-rl | result_buffer.py | 9,788 | Python |
"""Import a file from Illumina BaseSpace."""
import atexit
import gzip
import os
import time
import traceback
from pathlib import Path
from requests import RequestException, Session
from resolwe.process import (
BooleanField,
FileField,
GroupField,
IntegerField,
Persistence,
Process,
SecretField,
StringField,
)
class BaseSpaceDownloadError(Exception):
"""BaseSpace download error."""
pass
def download_file_repeatedly(
tries, session, file_id, file_name, expected_file_size, request_headers, error
):
"""Attempt to download BaseSpace file numerous times in case of errors."""
for i in range(tries):
try:
download_file(
session=session,
file_id=file_id,
file_name=file_name,
request_headers=request_headers,
error=error,
)
raise_for_file_corruption(
file_name=file_name, expected_file_size=expected_file_size, error=error
)
break
except BaseSpaceDownloadError:
if i + 1 == tries:
error("Could not download file from BaseSpace.")
else:
time.sleep(3)
def download_file(session, file_id, file_name, request_headers, error):
"""Download BaseSpace file."""
response = make_get_request(
session=session,
url=get_api_file_content_url(file_id=file_id),
headers=request_headers,
error=error,
stream=True,
)
try:
with open(file_name, "wb") as f:
chunk_size = 1024 * 1024 * 10
for chunk in response.iter_content(chunk_size=chunk_size):
f.write(chunk)
except FileNotFoundError:
error(f"Could not save file to {file_name}, due to directory not being found")
except PermissionError:
error(f"Could not save file to {file_name}, due to insufficient permissions")
except RequestException:
error(f"Could not save file to {file_name}, due to a network error")
def get_file_properties(session, file_id, request_headers, error):
"""Get file name and size (in bytes)."""
response = make_get_request(
session=session,
url=get_api_file_url(file_id=file_id),
headers=request_headers,
error=error,
)
info = response.json()["Response"]
return info["Name"], info["Size"]
def make_get_request(session, url, headers, error, stream=False):
"""Make a get request."""
response = session.get(url=url, headers=headers, stream=stream, timeout=60)
if response.status_code == 401:
error(f"Authentication failed on URL {url}")
elif response.status_code == 404:
error(f"BaseSpace file {url} not found")
elif response.status_code != 200:
error(f"Failed to retrieve content from {url}")
return response
def get_api_file_url(file_id):
"""Get BaseSpace API file URL."""
api_url = "https://api.basespace.illumina.com/v1pre3"
return f"{api_url}/files/{file_id}"
def get_api_file_content_url(file_id):
"""Get BaseSpace API file contents URL."""
return f"{get_api_file_url(file_id=file_id)}/content"
def output(output_option, value):
"""Print to standard output."""
if output_option == "full":
print(value)
elif output_option == "filename":
if value.startswith("filename="):
print(value[len("filename=") :])
def get_token_from_secret_file(secret_file_path, error):
"""Read secret file to obtain access token."""
try:
with open(secret_file_path, "r") as f:
return f.readline()
except FileNotFoundError:
error("Secret file not found")
except PermissionError:
error("No permissions to read secret file")
def on_exit(session):
"""Clean up function called on exit."""
session.close()
def raise_for_file_corruption(file_name, expected_file_size, error):
"""Raise an error if file does not pass integrity check."""
# Check file size.
actual_file_size = os.path.getsize(file_name)
if expected_file_size != actual_file_size:
error(
f"File's ({file_name}) expected size ({expected_file_size}) "
f"does not match its actual size ({actual_file_size})"
)
# Check gzip integrity.
if file_name.split(".")[-1] == "gz":
try:
with gzip.open(file_name, "rb") as f:
chunk_size = 1024 * 1024 * 10
while bool(f.read(chunk_size)):
pass
except OSError:
error(f"File {file_name} did not pass gzip integrity check")
class BaseSpaceImport(Process):
"""Import a file from Illumina BaseSpace."""
slug = "basespace-file-import"
name = "BaseSpace file"
process_type = "data:file"
version = "1.4.0"
category = "Import"
data_name = 'BaseSpace ({{ file_id|default("?") }})'
persistence = Persistence.TEMP
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/common:3.0.0"}
},
"resources": {
"cores": 1,
"memory": 1024,
"network": True,
"secrets": True,
},
}
class Input:
"""Input fields to process BaseSpaceImport."""
file_id = StringField(label="BaseSpace file ID")
access_token_secret = SecretField(
label="BaseSpace access token",
description="BaseSpace access token secret handle needed to download the file.",
)
show_advanced = BooleanField(
label="Show advanced options",
default=False,
)
class Advanced:
"""Advanced options."""
output = StringField(
label="Output",
allow_custom_choice=False,
choices=[("full", "Full"), ("filename", "Filename")],
default="filename",
description="Sets what is printed to standard output. "
"Argument 'Full' outputs everything, "
"argument 'Filename' outputs only file names of downloaded files.",
)
tries = IntegerField(
label="Tries",
description="Number of tries to download a file before giving up.",
range=[1, 10],
default=3,
)
verbose = BooleanField(
label="Verbose",
default=False,
description="Print detailed exception information to standard output "
"when error occurs. Output argument had no effect on this argument.",
)
advanced = GroupField(
Advanced, label="Advanced options", hidden="!show_advanced"
)
class Output:
"""Output fields to process BaseSpaceImport."""
file = FileField(label="File with reads")
def run(self, inputs, outputs):
"""Run import."""
secret_path = Path("/secrets") / inputs.access_token_secret["handle"]
session = Session()
atexit.register(on_exit, session)
try:
file_id = inputs.file_id
access_token = get_token_from_secret_file(
secret_file_path=secret_path, error=self.error
)
headers = {"x-access-token": access_token}
file_name, file_size = get_file_properties(
session=session,
file_id=file_id,
request_headers=headers,
error=self.error,
)
download_file_repeatedly(
tries=inputs.advanced.tries,
session=session,
file_id=file_id,
file_name=file_name,
expected_file_size=file_size,
request_headers=headers,
error=self.error,
)
output(inputs.advanced.output, f"filename={file_name}")
except Exception as error:
if inputs.advanced.verbose:
traceback.print_exc()
self.error(
"Unexpected error occurred while trying to download files from BaseSpace. "
"Check standard output for more details."
)
else:
print(str(error))
self.error(
"Unexpected error occurred while trying to download files from BaseSpace. "
"Set Verbose to True to see the traceback."
)
outputs.file = file_name
| 31.376812 | 95 | 0.588337 | [
"Apache-2.0"
] | plojyon/resolwe-bio | resolwe_bio/processes/import_data/basespace.py | 8,660 | Python |
from kafka import KafkaProducer
from kafka import KafkaConsumer
from kafka import KafkaAdminClient
import json
from json import dumps
from json import loads
import time
import os
import requests
import sys
import GE_GSCH_low_define as lowDefine
'''
{'requestID': 'req-f6720a0e-e3df-455a-825d-f8c80cedc2d9',
'date': '2021-10-18 13:46:30', 'status': 'create',
'fileID': 'b469e54a-721f-4c55-b43e-d09088556031', 'failCnt': 0,
'env': {
'type': 'global',
'targetClusters': ['c1', ['c2', 'c3'], 'c4'],
'priority': 'GLowLatencyPriority',
'option': {
'sourceCluster': 'c1',
'sourceNode': 'a-worker-node01'
}
}
}
'''
class GLowLatencyPriority_Job:
def __init__(self,request_data_dic):
self.job_name = lowDefine.SELF_POLICY_NAME
self.requestDataDic = request_data_dic
self.requestID=request_data_dic['requestID']
self.fileID=request_data_dic['fileID']
self.failCnt=request_data_dic['failCnt']
self.env=request_data_dic['env']
self.targetClusters=self.env['targetClusters']
self.sourceCluster=self.env['option']['sourceCluster']
self.sourceNode=self.env['option']['sourceNode']
self.sharedClusters = self.get_shared_clusters()
self.producer= KafkaProducer(acks=0,
compression_type='gzip',
bootstrap_servers=[lowDefine.KAFKA_SERVER_URL],
value_serializer=lambda x: dumps(x).encode('utf-8'))
def get_shared_clusters(self):
for item in self.targetClusters :
if type(item).__name__ == list :
if len(item) > 1 :
return item
else :
return None
else :
print()
#apply low-latency yaml with
def check_res_fail(self, res):
if res == None:
return True
if 'hcode' not in res:
return True
if 'lcode' not in res:
return True
if 'msg' not in res:
return True
if 'result' not in res['msg']:
return True
return False
def request_clusters_latency_from_clusterAgent(self,clusters):
try :
temp_msg = {'source':{'type':'none'},
'target':{'type':'cluster', 'object':self.sourceCluster},
'hcode':200,
'lcode':1,
'msg':{'requestID': self.requestID,'sourceNode': self.sourceNode,'targetClusters': clusters }
}
self.producer.send(lowDefine.GLOBAL_SCHEDULER_GLOBAL_TOPIC_NAME,value=temp_msg)
self.producer.flush()
except:
return 'process_fail'
return 'process_success'
def wait_request_clusters_latency_from_clusterAgent(self):
ordered_cluster_list =[]
res = self.wait_consumer()
if res == None:
print('res is None')
return 'process_fail', ordered_cluster_list
is_process_fail = self.check_res_fail(res)
hcode = res['hcode']
lcode = res['lcode']
result = res['msg']['result']
'''
result: [ {cluster: c3, latency: 11 },
{cluster: c2, latency: 34 } ]
'''
if is_process_fail:
print('Fail Job:', res)
return 'process_fail', ordered_cluster_list
else:
if hcode == 200 and lcode == 2:
for item in result :
ordered_cluster_list.append(item['cluster'])
return 'process_success', ordered_cluster_list
else :
return 'process_fail', ordered_cluster_list
def apply_yaml_to_ClusterAgent(self,cluster):
print('apply_yaml_to_ClusterAgent:',cluster)
try :
temp_msg = {'source':{'type':'none'},
'target':{'type':'cluster', 'object':cluster},
'hcode':210,
'lcode':1,
'msg':{'requestID': self.requestID,'fileID':self.fileID,'requestData':self.requestDataDic }
}
self.producer.send(lowDefine.GLOBAL_SCHEDULER_GLOBAL_TOPIC_NAME,value=temp_msg)
self.producer.flush()
except:
return 'process_fail'
return 'process_success'
def wait_apply_yaml_to_ClusterAgent(self):
res = self.wait_consumer()
if res == None:
print('res is None')
return 'process_fail'
is_process_fail = self.check_res_fail(res)
hcode = res['hcode']
lcode = res['lcode']
result = res['msg']['result']
print('hcode :hcode,result',hcode,lcode,result)
if is_process_fail:
print('Fail Job:', res)
return 'process_fail'
else:
if hcode == 210 and lcode == 2:
if result == 'success' :
return 'apply_success'
elif result == 'fail' :
return 'apply_fail'
elif result == 'cancel' :
return 'cancel'
else :
return 'process_fail'
else:
return 'process_fail'
def wait_consumer(self):
print('wait_consumer')
consumer = KafkaConsumer(
self.requestID,
bootstrap_servers=[lowDefine.KAFKA_SERVER_URL],
auto_offset_reset='earliest',
enable_auto_commit=True,
group_id=self.requestID,
value_deserializer=lambda x: loads(x.decode('utf-8')),
consumer_timeout_ms=1000*10
)
print('w-1')
res = None
for message in consumer:
print("Topic: %s, Partition: %d, Offset: %d, Key: %s, Value: %s" % ( message.topic, message.partition, message.offset, message.key, message.value ))
res = message.value
break
consumer.close()
return res
def start_job_processor():
print('start_job_processor')
while 1 :
#read dispatched queue
print('1')
try :
res = requests.get(lowDefine.FRONT_SERVER_SERVER_URL+'/ge/sch/gm/fs/dispatched-queue/policys/'+lowDefine.SELF_POLICY_NAME)
except:
print('wait front server to run',lowDefine.FRONT_SERVER_SERVER_URL)
time.sleep(5)
continue
if res.status_code == 200 :
print('2')
request_data_dic = json.loads(res.json())
print('request_data_dic',request_data_dic)
GE_Request_Job = GLowLatencyPriority_Job(request_data_dic)
print('3')
#send topic message
'''
return values
'apply_success' : apply is success
'process_success' :
'process_fail': raise error in process(apply or wait consumer, request latency)
'apply_fail' : apply is fail
'''
is_whole_process_status = None
for item in GE_Request_Job.targetClusters :
print('type(item)',type(item),item)
if type(item).__name__ == 'list' and len(item) > 1 :
r = GE_Request_Job.request_clusters_latency_from_clusterAgent(item)
if r == 'process_fail' :
print('internal error : request_clusters_latency_from_clusterAgent')
continue
r,clusters = GE_Request_Job.wait_request_clusters_latency_from_clusterAgent()
if r == 'process_fail' :
print('internal error : wait_request_clusters_latency_from_clusterAgent')
continue
for t_cluster in clusters:
r = GE_Request_Job.apply_yaml_to_ClusterAgent(t_cluster)
if r == 'process_fail' :
print('internal error : apply_yaml_to_ClusterAgent')
continue
r = GE_Request_Job.wait_apply_yaml_to_ClusterAgent()
if r == 'process_fail' :
print('internal error : wait_apply_yaml_to_ClusterAgent')
continue
elif r == 'apply_success' or r == 'cancel':
print('---pply_success or cancel',r)
is_whole_process_status = r
break
elif r == 'apply_fail' :
is_whole_process_status = r
continue
if r == 'apply_success' or r == 'cancel':
break
else :
r = GE_Request_Job.apply_yaml_to_ClusterAgent(item)
if r == 'process_fail' :
print('internal error : apply_yaml_to_ClusterAgent')
continue
r = GE_Request_Job.wait_apply_yaml_to_ClusterAgent()
if r == 'process_fail' :
print('internal error : wait_apply_yaml_to_ClusterAgent')
continue
elif r == 'apply_success' or r == 'cancel':
is_whole_process_status = r
print('apply_success or cancel:',r)
break
elif r == 'apply_fail':
is_whole_process_status = r
print('apply_fail')
continue
print('==============')
if is_whole_process_status == 'apply_fail' :
#GE_Request_Job.requestDataDic['status'] = 'failed'
requests.put(lowDefine.FRONT_SERVER_SERVER_URL+'/ge/sch/gm/fs/dispatched-queue/'+GE_Request_Job.requestID+'/status/failed')
elif is_whole_process_status == 'apply_success' :
#GE_Request_Job.requestDataDic['status'] = 'completed'
requests.put(lowDefine.FRONT_SERVER_SERVER_URL+'/ge/sch/gm/fs/dispatched-queue/'+GE_Request_Job.requestID+'/status/completed')
elif is_whole_process_status == 'cancel' :
#GE_Request_Job.requestDataDic['status'] = 'cancel'
requests.put(lowDefine.FRONT_SERVER_SERVER_URL+'/ge/sch/gm/fs/dispatched-queue/'+GE_Request_Job.requestID+'/status/canceled')
else :
#GE_Request_Job.requestDataDic['status'] = 'cancel'
requests.put(lowDefine.FRONT_SERVER_SERVER_URL+'/ge/sch/gm/fs/dispatched-queue/'+GE_Request_Job.requestID+'/status/canceled')
else:
print('despatched queue is empty')
time.sleep(5)
continue
#time.sleep(1)
if __name__ == '__main__':
start_job_processor()
| 40.507407 | 161 | 0.540642 | [
"Apache-2.0"
] | gedge-platform/GEdge-Platform | gs-scheduler/global_scheduler2/policy_dockerfile/lowlatency/GE_GSCH_low_latency.py | 10,937 | Python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = [
'ListTopicKeysResult',
'AwaitableListTopicKeysResult',
'list_topic_keys',
]
@pulumi.output_type
class ListTopicKeysResult:
"""
Namespace/ServiceBus Connection String
"""
def __init__(__self__, alias_primary_connection_string=None, alias_secondary_connection_string=None, key_name=None, primary_connection_string=None, primary_key=None, secondary_connection_string=None, secondary_key=None):
if alias_primary_connection_string and not isinstance(alias_primary_connection_string, str):
raise TypeError("Expected argument 'alias_primary_connection_string' to be a str")
pulumi.set(__self__, "alias_primary_connection_string", alias_primary_connection_string)
if alias_secondary_connection_string and not isinstance(alias_secondary_connection_string, str):
raise TypeError("Expected argument 'alias_secondary_connection_string' to be a str")
pulumi.set(__self__, "alias_secondary_connection_string", alias_secondary_connection_string)
if key_name and not isinstance(key_name, str):
raise TypeError("Expected argument 'key_name' to be a str")
pulumi.set(__self__, "key_name", key_name)
if primary_connection_string and not isinstance(primary_connection_string, str):
raise TypeError("Expected argument 'primary_connection_string' to be a str")
pulumi.set(__self__, "primary_connection_string", primary_connection_string)
if primary_key and not isinstance(primary_key, str):
raise TypeError("Expected argument 'primary_key' to be a str")
pulumi.set(__self__, "primary_key", primary_key)
if secondary_connection_string and not isinstance(secondary_connection_string, str):
raise TypeError("Expected argument 'secondary_connection_string' to be a str")
pulumi.set(__self__, "secondary_connection_string", secondary_connection_string)
if secondary_key and not isinstance(secondary_key, str):
raise TypeError("Expected argument 'secondary_key' to be a str")
pulumi.set(__self__, "secondary_key", secondary_key)
@property
@pulumi.getter(name="aliasPrimaryConnectionString")
def alias_primary_connection_string(self) -> str:
"""
Primary connection string of the alias if GEO DR is enabled
"""
return pulumi.get(self, "alias_primary_connection_string")
@property
@pulumi.getter(name="aliasSecondaryConnectionString")
def alias_secondary_connection_string(self) -> str:
"""
Secondary connection string of the alias if GEO DR is enabled
"""
return pulumi.get(self, "alias_secondary_connection_string")
@property
@pulumi.getter(name="keyName")
def key_name(self) -> str:
"""
A string that describes the authorization rule.
"""
return pulumi.get(self, "key_name")
@property
@pulumi.getter(name="primaryConnectionString")
def primary_connection_string(self) -> str:
"""
Primary connection string of the created namespace authorization rule.
"""
return pulumi.get(self, "primary_connection_string")
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> str:
"""
A base64-encoded 256-bit primary key for signing and validating the SAS token.
"""
return pulumi.get(self, "primary_key")
@property
@pulumi.getter(name="secondaryConnectionString")
def secondary_connection_string(self) -> str:
"""
Secondary connection string of the created namespace authorization rule.
"""
return pulumi.get(self, "secondary_connection_string")
@property
@pulumi.getter(name="secondaryKey")
def secondary_key(self) -> str:
"""
A base64-encoded 256-bit primary key for signing and validating the SAS token.
"""
return pulumi.get(self, "secondary_key")
class AwaitableListTopicKeysResult(ListTopicKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListTopicKeysResult(
alias_primary_connection_string=self.alias_primary_connection_string,
alias_secondary_connection_string=self.alias_secondary_connection_string,
key_name=self.key_name,
primary_connection_string=self.primary_connection_string,
primary_key=self.primary_key,
secondary_connection_string=self.secondary_connection_string,
secondary_key=self.secondary_key)
def list_topic_keys(authorization_rule_name: Optional[str] = None,
namespace_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
topic_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListTopicKeysResult:
"""
Namespace/ServiceBus Connection String
API Version: 2017-04-01.
:param str authorization_rule_name: The authorization rule name.
:param str namespace_name: The namespace name
:param str resource_group_name: Name of the Resource group within the Azure subscription.
:param str topic_name: The topic name.
"""
__args__ = dict()
__args__['authorizationRuleName'] = authorization_rule_name
__args__['namespaceName'] = namespace_name
__args__['resourceGroupName'] = resource_group_name
__args__['topicName'] = topic_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:servicebus:listTopicKeys', __args__, opts=opts, typ=ListTopicKeysResult).value
return AwaitableListTopicKeysResult(
alias_primary_connection_string=__ret__.alias_primary_connection_string,
alias_secondary_connection_string=__ret__.alias_secondary_connection_string,
key_name=__ret__.key_name,
primary_connection_string=__ret__.primary_connection_string,
primary_key=__ret__.primary_key,
secondary_connection_string=__ret__.secondary_connection_string,
secondary_key=__ret__.secondary_key)
| 43.629139 | 224 | 0.713418 | [
"Apache-2.0"
] | pulumi/pulumi-azure-nextgen | sdk/python/pulumi_azure_nextgen/servicebus/list_topic_keys.py | 6,588 | Python |
"""
Base backend
Trace and Database classes from the other modules should Subclass the base
classes.
"""
import PyMC2
class Trace(object):
"""Dummy Trace class.
"""
def __init__(self,value=None, obj=None):
"""Assign an initial value and an internal PyMC object."""
self._trace = value
if obj is not None:
if isinstance(obj, PyMC2.PyMCBase):
self._obj = obj
else:
raise AttributeError, 'Not PyMC object', obj
def _initialize(self, length):
"""Dummy method. Subclass if necessary."""
pass
def tally(self, index):
"""Dummy method. Subclass if necessary."""
pass
def truncate(self, index):
"""Dummy method. Subclass if necessary."""
pass
def gettrace(self, burn=0, thin=1, chain=-1, slicing=None):
"""Dummy method. Subclass if necessary.
Input:
- burn (int): The number of transient steps to skip.
- thin (int): Keep one in thin.
- chain (int): The index of the chain to fetch. If None, return all chains.
- slicing: A slice, overriding burn and thin assignement.
"""
raise AttributeError, self._obj.__name__ + " has no trace"
__call__ = gettrace
## def obj():
## def fset(self, obj):
## if isinstance(obj, PyMC2.PyMCBase):
## self.__obj = obj
## else:
## raise AttributeError, 'Not PyMC object'
## def fget(self):
## return self.__obj
## return locals()
## obj = property(**obj())
def _finalize(self):
pass
class Database(object):
"""Dummy Database backend"""
def __init__(self):
"""Get the Trace from the local scope."""
self.Trace = Trace
def _initialize(self, length):
"""Tell the traces to initialize themselves."""
for o in self.model._pymc_objects_to_tally:
o.trace._initialize(length)
def tally(self, index):
"""Dummy method. Subclass if necessary."""
for o in self.model._pymc_objects_to_tally:
o.trace.tally(index)
def connect(self, sampler):
"""Link the Database to the Sampler instance.
If database is loaded from a file, restore the objects trace
to their stored value, if a new database is created, instantiate
a Trace for the PyMC objects to tally.
"""
if isinstance(sampler, PyMC2.Sampler):
self.model = sampler
else:
raise AttributeError, 'Not a Sampler instance.'
if hasattr(self, '_state_'):
# Restore the state of the Sampler.
for o in sampler._pymc_objects_to_tally:
o.trace = getattr(self, o.__name__)
o.trace._obj = o
else:
# Set a fresh new state
for o in sampler._pymc_objects_to_tally:
o.trace = self.Trace(obj=o)
for o in sampler._pymc_objects_to_tally:
o.trace.db = self
def _finalize(self):
"""Tell the traces to finalize themselves."""
for o in self.model._pymc_objects_to_tally:
o.trace._finalize()
def close(self):
"""Close the database."""
pass
def savestate(self, state):
"""Store a dictionnary containing the state of the Sampler and its
SamplingMethods."""
self._state_ = state
def getstate(self):
"""Return a dictionary containing the state of the Sampler and its
SamplingMethods."""
return self._state_
| 31.008264 | 86 | 0.559701 | [
"Apache-2.0"
] | rsumner31/pymc3-23 | PyMC2/database/base.py | 3,752 | Python |
from utils import TreeNode, binary_tree
class Solution:
def __init__(self):
self.index = 0 # 利用[中序遍历左边元素数量 = 左子树节点总数]可以省掉这个计数的字段
def buildTree(self, preorder, inorder):
"""
:type preorder: List[int]
:type inorder: List[int]
:rtype: TreeNode
"""
if not preorder:
return None
def build_node(lo, hi):
node = TreeNode(preorder[self.index])
self.index += 1
j = inorder.index(node.val, lo, hi) # 有些解法生成字典加快这步,但这会增大空间复杂度
if self.index < len(preorder) and preorder[self.index] in inorder[lo:j]:
node.left = build_node(lo, j)
if self.index < len(preorder) and preorder[self.index] in inorder[j + 1:hi]:
node.right = build_node(j + 1, hi)
return node
return build_node(0, len(preorder))
if __name__ == '__main__':
x = Solution().buildTree([1, 2, 4, 6, 5, 7, 8, 3, 9], [4, 6, 2, 7, 5, 8, 1, 9, 3])
x = Solution().buildTree([3, 9, 20, 15, 7], [9, 3, 15, 20, 7])
| 26 | 83 | 0.642857 | [
"MIT"
] | Kaciras/leetcode | medium/Q105_ConstructBinaryTreeFromPreorderAndInorderTraversal.py | 1,016 | Python |
#
# Copyright (c) 2019, 2021 by Delphix. All rights reserved.
#
import dlpx.virtualization.api
from dlpx.virtualization.common.util import to_str
def get_virtualization_api_version():
"""Returns the Virutalization API version string.
:return: version string
"""
return to_str(dlpx.virtualization.api.__version__)
| 23.714286 | 59 | 0.756024 | [
"Apache-2.0"
] | Balamuruhan/virtualization-sdk | platform/src/main/python/dlpx/virtualization/platform/util.py | 332 | Python |
# -*- coding: utf-8 -*-
from ..base import Property
from .array import StateVector
from .base import Type
class Particle(Type):
"""
Particle type
A particle type which contains a state and weight
"""
state_vector: StateVector = Property(doc="State vector")
weight: float = Property(doc='Weight of particle')
parent: 'Particle' = Property(default=None, doc='Parent particle')
def __init__(self, state_vector, weight, parent=None, *args, **kwargs):
if parent:
parent.parent = None
if state_vector is not None and not isinstance(state_vector, StateVector):
state_vector = StateVector(state_vector)
super().__init__(state_vector, weight, parent, *args, **kwargs)
@property
def ndim(self):
return self.state_vector.shape[0]
| 29.321429 | 82 | 0.666261 | [
"MIT"
] | 0sm1um/Stone-Soup | stonesoup/types/particle.py | 821 | Python |
from heapq import heappush, nsmallest
import numpy as np
class NearestNeighbor():
def __init__(self, embeddings, encodings, config):
self.embeddings = embeddings
self.encodings = encodings
self.config = config
def euclidian_distance(self, e1, e2):
'''
https://stackoverflow.com/questions/1401712/how-can-the-euclidean-distance-be-calculated-with-numpy
'''
return np.linalg.norm(e1 - e2)
def get_embedding(self, word):
if self.encodings.word_in_vocab(word):
return self.embeddings[word]
return self.embeddings[config.unknown_word]
def nearest_neighbors(self, word, count=1):
embedding = self.get_embedding(word)
heap = []
# TODO: is it faster to not have the the string comparision and instead always
# remove the first element of the array which will have a distance of 0
# TODO: implement faster solution than the heap where it only keeps track of K
# values which should vastly reduce the number of operations required.
for w in self.embeddings:
if w == word:
continue
dist = self.euclidian_distance(embedding, self.embeddings[w])
heappush(heap, (dist, w))
return nsmallest(count, heap)
| 34.868421 | 107 | 0.644528 | [
"MIT"
] | bi3mer/Word2Vec | Word2Vec/NearestNeighbor.py | 1,325 | Python |
# Copyright (c) 2013, igrekus and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from dc_plc.custom.utils import add_completeness, add_query_relevance
from dc_plc.controllers.stats_query import get_procmap_stats
def execute(filters=None):
columns = get_columns()
data = get_data(filters)
return columns, data
def get_columns():
return [
"ID:Link/DC_PLC_Product_Summary",
_("Relevance"),
_("Progress"),
_("RnD Title"),
_("Function"),
_("External number"),
_("Process map"),
_("Internal number")
]
def get_data(filters):
res = get_procmap_stats(filters)
has_perms = 'DC_PLC_Process_Map_Specialist' in frappe.get_roles(frappe.session.user)
res = [add_completeness(row, [4]) for row in res]
res = [add_query_relevance(row, has_perms) for row in res]
return res
| 21.609756 | 85 | 0.749436 | [
"MIT"
] | igrekus/dc_plc | dc_plc/dc_plc/report/dc_product_procmap_stats/dc_product_procmap_stats.py | 886 | Python |
un = 0
re = 0
gramo = 0
mientras que es cierto :
numero = int ( entrada ( "" ))
si ( numero == 4 ):
descanso
si ( numero == 1 ):
un = un + 1
elif ( numero == 2 ):
re = re + 1
elif ( numero == 3 ):
gramo = gramo + 1
elif ( numero == 4 ):
descanso
print ( f"MUITO OBRIGADO \n Alcohol:, { a } \n Gasolina: { g } \n Diesel: { d } " ) | 24.470588 | 88 | 0.435096 | [
"MIT"
] | Davidpadilla1234/Taller-de-Estrucuras-de-Control-Repeticion | Taller-de-Estrucuras-de-Control-Repeticion/ejercicio 9.py | 416 | Python |
# module for distance computation;
import numpy as np
def dist(arraya, arrayb, mode):
if mode == 0:
dis = np.sum(np.abs(np.subtract(arraya, arrayb)))
elif mode == 1:
dis = np.sqrt(np.sum(np.power(np.subtract(arraya, arrayb), 2)))
else:
dis = 1 - np.dot(arraya, arrayb) / np.sqrt(np.sum(np.power(arraya, 2)) * np.sum(np.power(arrayb, 2)))
return dis
def corr(arraya, arrayb, show):
a = np.subtract(arraya, np.mean(arraya))
b = np.subtract(arrayb, np.mean(arrayb))
corr = np.sum(np.multiply(a, b)) / np.sqrt(np.multiply(np.sum(np.power(a, 2)), np.sum(np.power(b, 2))))
return corr | 35.444444 | 109 | 0.619122 | [
"MIT"
] | Lan-Jing/Courses | DCS311 Artificial Intelligence/KNN/lab1_code/M3/dist.py | 638 | Python |
"""Run CVEjob."""
import sys
from decimal import Decimal
import multiprocessing
import nvdlib
from nvdlib.manager import FeedManager
from nvdlib.query_selectors import in_range
from cvejob.filters.input import validate_cve
from cvejob.config import Config
from cvejob.identifiers import get_identifier_cls
from cvejob.cpe2pkg import get_pkgfile_path, PackageNameCandidate
from cvejob.selectors.basic import VersionSelector
from cvejob.outputs.victims import VictimsYamlOutput
from cvejob.versions import NVDVersions
from cvejob.utils import parse_date_range
import logging
# logging configuration
logging.basicConfig(level=logging.DEBUG,
handlers=[nvdlib.get_logging_handler()]) # use nvdlib's handler
logger = logging.getLogger('cvejob')
FEED_NAME_PATTERN = r"nvdcve-" \
r"(?P<version>[\d.]+)-" \
r"(?P<name>(?P<name_string>(([A-Za-z]+)))|(?P<name_year>([\d]+)))" \
r".json"
def _log_results(victims_output):
"""Log results."""
cve_id = victims_output.cve.id_
logger.info(
"[{cve_id}] picked `{winner}` out of `{candidates}`".format(
cve_id=cve_id,
winner=victims_output.winner,
candidates=victims_output.candidates
))
logger.info(
"[{cve_id}] Affected version range: {version_ranges}".format(
cve_id=cve_id,
version_ranges=victims_output.affected_versions
))
logger.info(
"[{cve_id}] Safe version range: {version_ranges}".format(
cve_id=cve_id,
version_ranges=victims_output.safe_versions
))
def _filter_collection(collection, date_range, cherry_pick):
"""Filter Document collection."""
if date_range:
collection_size_before = collection.count()
collection = collection.find(
{'published_date': in_range(*date_range)}
)
logger.debug(("Filtered out {} Documents that do not fall "
"in the given range.").format(
collection_size_before - collection.count()
))
if cherry_pick:
logger.debug("Cherry-picked CVE `{cve_id}`".format(
cve_id=cherry_pick
))
collection = collection.find(
{'cve.id_': cherry_pick}
)
return collection
def run():
"""Run CVEjob."""
feed_dir = Config.feed_dir
feed_names = Config.feed_names
date_range = Config.date_range
cherrypicked_cve_id = Config.cve_id
cherrypicked_year = None
if cherrypicked_cve_id:
cherrypicked_year = cherrypicked_cve_id.split(sep='-')[1]
if int(cherrypicked_year) < 2002:
# all CVEs prior to 2002 are stored in 2002 feed
cherrypicked_year = 2002
if date_range:
date_range = parse_date_range(Config.date_range)
feed_names = range(date_range[0].year, date_range[1].year + 1)
if cherrypicked_cve_id: # optimization check
if int(cherrypicked_year) not in feed_names:
logger.info(
"[{picked_cve_id}] does not belong to the given feed range:"
" {date_range}".format(
picked_cve_id=cherrypicked_cve_id,
date_range=date_range
))
return
# prune the feed names as it is not necessary to iterate over all of them
feed_names = [cherrypicked_year]
if not feed_names:
if cherrypicked_cve_id:
feed_names = [cherrypicked_year]
else:
feed_names = ['modified']
with FeedManager(n_workers=multiprocessing.cpu_count()) as feed_manager:
feeds = feed_manager.fetch_feeds(
feed_names=feed_names, data_dir=feed_dir, update=True
)
collection = feed_manager.collect(feeds)
collection = _filter_collection(collection,
date_range,
cherrypicked_cve_id)
if not collection: # collection is empty
logger.info(
"Collection is empty.".format(
picked_cve_id=cherrypicked_cve_id,
))
return
logger.debug("Number of CVE Documents in the collection: {}".format(
collection.count()
))
if Config.package_name and Config.cve_id:
# user knows the package name, so we don't have to guess ;)
doc = [x for x in collection][0] # Collection doesn't support indexing
affected, safe = NVDVersions(doc, Config.package_name, Config.ecosystem).run()
victims_output = VictimsYamlOutput(
ecosystem=Config.ecosystem,
cve_doc=doc,
winner=PackageNameCandidate(Config.package_name, Decimal('1.0')),
candidates=[],
affected=affected,
fixedin=safe
)
_log_results(victims_output)
victims_output.write()
sys.exit(0)
for doc in collection:
cve_id = doc.cve.id_
try:
if not validate_cve(doc):
logger.debug(
"[{cve_id}] was filtered out by input checks".format(
cve_id=cve_id
))
continue
pkgfile_path = get_pkgfile_path(Config.pkgfile_dir, Config.ecosystem)
identifier = get_identifier_cls()(doc, Config.ecosystem, pkgfile_path)
candidates = identifier.identify()
if not candidates:
logger.info(
"[{cve_id}] no package name candidates found".format(
cve_id=cve_id
))
continue
selector = VersionSelector(doc, candidates, Config.ecosystem)
winner = selector.pick_winner()
if not winner:
logger.info(
"[{cve_id}] no package name found".format(
cve_id=cve_id
))
continue
affected, safe = NVDVersions(doc, winner.package, Config.ecosystem).run()
victims_output = VictimsYamlOutput(
ecosystem=Config.ecosystem,
cve_doc=doc,
winner=winner,
candidates=candidates,
affected=affected,
fixedin=safe
)
_log_results(victims_output)
victims_output.write()
except Exception as exc:
logger.warning(
"[{cve_id}] Unexpected exception occurred: {exc}".format(
cve_id=cve_id,
exc=exc
), exc_info=True)
if __name__ == '__main__':
run()
| 29.666667 | 88 | 0.582496 | [
"Apache-2.0"
] | jparsai/cvejob | run.py | 6,764 | Python |
# This file is generated by C:\projects\numpy-wheels\numpy\setup.py
# It contains system_info results at the time of building this package.
__all__ = ["get_info","show"]
blas_opt_info={'library_dirs': ['C:\\projects\\numpy-wheels\\windows-wheel-builder\\atlas-builds\\atlas-3.11.38-sse2-64\\lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None), ('ATLAS_INFO', '"\\"None\\""')], 'libraries': ['numpy-atlas']}
lapack_mkl_info={}
lapack_opt_info={'library_dirs': ['C:\\projects\\numpy-wheels\\windows-wheel-builder\\atlas-builds\\atlas-3.11.38-sse2-64\\lib'], 'language': 'f77', 'libraries': ['numpy-atlas', 'numpy-atlas'], 'define_macros': [('ATLAS_INFO', '"\\"None\\""')]}
atlas_3_10_blas_threads_info={'library_dirs': ['C:\\projects\\numpy-wheels\\windows-wheel-builder\\atlas-builds\\atlas-3.11.38-sse2-64\\lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None), ('ATLAS_INFO', '"\\"None\\""')], 'libraries': ['numpy-atlas']}
atlas_3_10_threads_info={'library_dirs': ['C:\\projects\\numpy-wheels\\windows-wheel-builder\\atlas-builds\\atlas-3.11.38-sse2-64\\lib'], 'language': 'f77', 'libraries': ['numpy-atlas', 'numpy-atlas'], 'define_macros': [('ATLAS_INFO', '"\\"None\\""')]}
openblas_info={}
blas_mkl_info={}
openblas_lapack_info={}
def get_info(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print(name + ":")
if not info_dict:
print(" NOT AVAILABLE")
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
print(" %s = %s" % (k,v))
| 62 | 263 | 0.609566 | [
"CC0-1.0"
] | CSnap/photogate | python-3.4.4.amd64/Lib/site-packages/numpy/__config__.py | 1,798 | Python |
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import functools
import logging
from os import path
import boto3
import jsonschema
from c7n_mailer import deploy, utils
from c7n_mailer.azure_mailer.azure_queue_processor import MailerAzureQueueProcessor
from c7n_mailer.azure_mailer import deploy as azure_deploy
from c7n_mailer.sqs_queue_processor import MailerSqsQueueProcessor
from c7n_mailer.utils import get_provider, Providers
from ruamel import yaml
AZURE_KV_SECRET_SCHEMA = {
'type': 'object',
'properties': {
'type': {'enum': ['azure.keyvault']},
'secret': {'type': 'string'}
},
'required': ['type', 'secret'],
'additionalProperties': False
}
SECURED_STRING_SCHEMA = {
'oneOf': [
{'type': 'string'},
AZURE_KV_SECRET_SCHEMA
]
}
CONFIG_SCHEMA = {
'type': 'object',
'additionalProperties': False,
'required': ['queue_url'],
'properties': {
'queue_url': {'type': 'string'},
'from_address': {'type': 'string'},
'contact_tags': {'type': 'array', 'items': {'type': 'string'}},
'org_domain': {'type': 'string'},
# Standard Lambda Function Config
'region': {'type': 'string'},
'role': {'type': 'string'},
'runtime': {'type': 'string'},
'memory': {'type': 'integer'},
'timeout': {'type': 'integer'},
'subnets': {'type': 'array', 'items': {'type': 'string'}},
'security_groups': {'type': 'array', 'items': {'type': 'string'}},
'dead_letter_config': {'type': 'object'},
'lambda_name': {'type': 'string'},
'lambda_description': {'type': 'string'},
'lambda_tags': {'type': 'object'},
'lambda_schedule': {'type': 'string'},
# Azure Function Config
'function_properties': {
'type': 'object',
'appInsights': {
'type': 'object',
'oneOf': [
{'type': 'string'},
{'type': 'object',
'properties': {
'name': 'string',
'location': 'string',
'resourceGroupName': 'string'}
}
]
},
'storageAccount': {
'type': 'object',
'oneOf': [
{'type': 'string'},
{'type': 'object',
'properties': {
'name': 'string',
'location': 'string',
'resourceGroupName': 'string'}
}
]
},
'servicePlan': {
'type': 'object',
'oneOf': [
{'type': 'string'},
{'type': 'object',
'properties': {
'name': 'string',
'location': 'string',
'resourceGroupName': 'string',
'skuTier': 'string',
'skuName': 'string'}
}
]
},
},
'function_schedule': {'type': 'string'},
'function_skuCode': {'type': 'string'},
'function_sku': {'type': 'string'},
# Mailer Infrastructure Config
'cache_engine': {'type': 'string'},
'smtp_server': {'type': 'string'},
'smtp_port': {'type': 'integer'},
'smtp_ssl': {'type': 'boolean'},
'smtp_username': {'type': 'string'},
'smtp_password': SECURED_STRING_SCHEMA,
'ldap_email_key': {'type': 'string'},
'ldap_uid_tags': {'type': 'array', 'items': {'type': 'string'}},
'debug': {'type': 'boolean'},
'ldap_uid_regex': {'type': 'string'},
'ldap_uri': {'type': 'string'},
'ldap_bind_dn': {'type': 'string'},
'ldap_bind_user': {'type': 'string'},
'ldap_uid_attribute': {'type': 'string'},
'ldap_manager_attribute': {'type': 'string'},
'ldap_email_attribute': {'type': 'string'},
'ldap_bind_password_in_kms': {'type': 'boolean'},
'ldap_bind_password': {'type': 'string'},
'cross_accounts': {'type': 'object'},
'ses_region': {'type': 'string'},
'redis_host': {'type': 'string'},
'redis_port': {'type': 'integer'},
'datadog_api_key': {'type': 'string'}, # TODO: encrypt with KMS?
'datadog_application_key': {'type': 'string'}, # TODO: encrypt with KMS?
'slack_token': {'type': 'string'},
'slack_webhook': {'type': 'string'},
'sendgrid_api_key': SECURED_STRING_SCHEMA,
'splunk_hec_url': {'type': 'string'},
'splunk_hec_token': {'type': 'string'},
'splunk_remove_paths': {
'type': 'array',
'items': {'type': 'string'}
},
'splunk_actions_list': {'type': 'boolean'},
'splunk_max_attempts': {'type': 'integer'},
'splunk_hec_max_length': {'type': 'integer'},
# SDK Config
'profile': {'type': 'string'},
'http_proxy': {'type': 'string'},
'https_proxy': {'type': 'string'},
# Mapping account / emails
'account_emails': {'type': 'object'}
}
}
def session_factory(mailer_config):
return boto3.Session(
region_name=mailer_config['region'],
profile_name=mailer_config.get('profile', None))
def get_logger(debug=False):
log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_format)
logging.getLogger('botocore').setLevel(logging.WARNING)
if debug:
logging.getLogger('botocore').setLevel(logging.DEBUG)
debug_logger = logging.getLogger('custodian-mailer')
debug_logger.setLevel(logging.DEBUG)
return debug_logger
else:
return logging.getLogger('custodian-mailer')
def get_and_validate_mailer_config(args):
with open(args.config) as fh:
config = yaml.load(fh.read(), Loader=yaml.SafeLoader)
jsonschema.validate(config, CONFIG_SCHEMA)
utils.setup_defaults(config)
return config
def get_c7n_mailer_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', required=True, help='mailer.yml config file')
debug_help_msg = 'sets c7n_mailer logger to debug, for maximum output (the default is INFO)'
parser.add_argument('--debug', action='store_true', help=debug_help_msg)
max_num_processes_help_msg = 'will run the mailer in parallel, integer of max processes allowed'
parser.add_argument('--max-num-processes', type=int, help=max_num_processes_help_msg)
templates_folder_help_msg = 'message templates folder location'
parser.add_argument('-t', '--templates', help=templates_folder_help_msg)
group = parser.add_mutually_exclusive_group(required=True)
update_lambda_help_msg = 'packages your c7n_mailer, uploads the zip to aws lambda as a function'
group.add_argument('--update-lambda', action='store_true', help=update_lambda_help_msg)
run_help_msg = 'run c7n-mailer locally, process sqs messages and send emails or sns messages'
group.add_argument('--run', action='store_true', help=run_help_msg)
return parser
def run_mailer_in_parallel(processor, max_num_processes):
max_num_processes = int(max_num_processes)
if max_num_processes < 1:
raise Exception
processor.max_num_processes = max_num_processes
processor.run(parallel=True)
def main():
parser = get_c7n_mailer_parser()
args = parser.parse_args()
mailer_config = get_and_validate_mailer_config(args)
args_dict = vars(args)
logger = get_logger(debug=args_dict.get('debug', False))
module_dir = path.dirname(path.abspath(__file__))
default_templates = [path.abspath(path.join(module_dir, 'msg-templates')),
path.abspath(path.join(module_dir, '..', 'msg-templates')),
path.abspath('.')]
templates = args_dict.get('templates', None)
if templates:
default_templates.append(path.abspath(path.expanduser(path.expandvars(templates))))
mailer_config['templates_folders'] = default_templates
provider = get_provider(mailer_config)
if args_dict.get('update_lambda'):
if args_dict.get('debug'):
print('\n** --debug is only supported with --run, not --update-lambda **\n')
return
if args_dict.get('max_num_processes'):
print('\n** --max-num-processes is only supported '
'with --run, not --update-lambda **\n')
return
if provider == Providers.Azure:
azure_deploy.provision(mailer_config)
elif provider == Providers.AWS:
deploy.provision(mailer_config, functools.partial(session_factory, mailer_config))
if args_dict.get('run'):
max_num_processes = args_dict.get('max_num_processes')
# Select correct processor
if provider == Providers.Azure:
processor = MailerAzureQueueProcessor(mailer_config, logger)
elif provider == Providers.AWS:
aws_session = session_factory(mailer_config)
processor = MailerSqsQueueProcessor(mailer_config, aws_session, logger)
# Execute
if max_num_processes:
run_mailer_in_parallel(processor, max_num_processes)
else:
processor.run()
if __name__ == '__main__':
main()
| 37.527344 | 100 | 0.575518 | [
"Apache-2.0"
] | CU-CommunityApps/cloud-custodian | tools/c7n_mailer/c7n_mailer/cli.py | 9,607 | Python |
from sklearn.exceptions import NotFittedError
class MockFunction:
"""
Mock utility function for testing.
"""
def __init__(self, return_val):
self.return_val = return_val
def __call__(self, *args):
return self.return_val
class MockEstimator:
"""
Mock classifier object for testing.
"""
def __init__(
self, predict_proba_return=None, predict_return=None, score_return=None,
classes_=None, fitted=True
):
self.fitted = fitted
if fitted:
self.classes_ = classes_
self.predict_return = predict_return
self.predict_proba_return = predict_proba_return
self.score_return = score_return
def fit(self, *args, **kwargs):
pass
def predict(self, *args, **kwargs):
if not self.fitted:
raise NotFittedError
return self.predict_return
def predict_proba(self, *args, **kwargs):
if not self.fitted:
raise NotFittedError
return self.predict_proba_return
def score(self, *args, **kwargs):
return self.score_return
class MockActiveLearner:
"""
Mock ActiveLearner for testing.
"""
def __init__(
self, predictor=None, query_strategy=None,
predict_proba_return=None, calculate_utility_return=None, predict_return=None, score_return=None,
_X_initial=None, _y_initial=None
):
self.estimator = predictor
self.query_strategy = query_strategy
self.predict_proba_return = predict_proba_return
self.calculate_utility_return = calculate_utility_return
self.predict_return = predict_return
self.score_return = score_return
def fit(self, *args, **kwargs):
pass
def predict(self, *args, **kwargs):
return self.predict_return
def predict_proba(self, *args, **kwargs):
return self.predict_proba_return
def score(self, *args, **kwargs):
return self.score_return
class MockCommittee:
"""
Mock Committee for testing.
"""
def __init__(
self, n_learners=1, classes_=None, fitted=True,
calculate_disagreement_return=None,
predict_return=None, predict_proba_return=None,
vote_return=None, vote_proba_return=None
):
self.fitted = fitted
self.n_learners = n_learners
if fitted:
self.classes_ = classes_
else:
self.classes_ = None
self.calculate_disagreement_return = calculate_disagreement_return
self.predict_return = predict_return
self.predict_proba_return = predict_proba_return
self.vote_return = vote_return
self.vote_proba_return = vote_proba_return
def __len__(self):
return self.n_learners
def __iter__(self):
for x in range(self.n_learners):
yield x
def _calculate_disagreement(self, *args, **kwargs):
return self.calculate_disagreement_return
def predict(self, *args, **kwargs):
if not self.fitted:
raise NotFittedError
return self.predict_return
def predict_proba(self, *args, **kwargs):
if not self.fitted:
raise NotFittedError
return self.predict_proba_return
def vote(self, *args, **kwargs):
if not self.fitted:
raise NotFittedError
return self.vote_return
def vote_proba(self, *args, **kwargs):
if not self.fitted:
raise NotFittedError
return self.vote_proba_return
| 25.963768 | 109 | 0.640246 | [
"MIT"
] | AlexandreAbraham/modAL | tests/mock.py | 3,583 | Python |
class InkCanvasEditingMode(Enum,IComparable,IFormattable,IConvertible):
"""
Specifies the editing mode for the System.Windows.Controls.InkCanvas
enum InkCanvasEditingMode,values: EraseByPoint (5),EraseByStroke (6),GestureOnly (2),Ink (1),InkAndGesture (3),None (0),Select (4)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
EraseByPoint=None
EraseByStroke=None
GestureOnly=None
Ink=None
InkAndGesture=None
None=None
Select=None
value__=None
| 27.902439 | 215 | 0.682692 | [
"MIT"
] | BCSharp/ironpython-stubs | release/stubs.min/System/Windows/Controls/__init___parts/InkCanvasEditingMode.py | 1,144 | Python |
#!/usr/bin/env python
from itertools import izip
import xmlrpclib
import rospy
from rospy.rostime import Time, Duration
from flexbe_core import EventState as Dummy
from flexbe_core import Logger
from flexbe_core.proxy import ProxyPublisher, ProxySubscriberCached, ProxyActionClient
from sensor_msgs.msg import JointState
from sweetie_bot_control_msgs.msg import SetOperationalAction, SetOperationalGoal, SetOperationalResult
# This is helper class so trick FlexBe App to ignore it.
# Dummy is actually EventState but FlexBe App is not able to recognize it.
class SetJointStateBase(Dummy):
'''
Base class for states which move robot to named pose using FollowJointState controller.
Pose is loaded from binary parameter from Parameter Server as JointState message.
Then state activate FollowJointState controller and publish pose.
Movement is considered finished when position error is less then given tolerance.
-- controller string FollowJointState controller namespace.
-- tolerance float Position tolerance (rad).
-- timeout float Movement timeout (s).
-- joint_topic string Topic where actual pose published.
<= done Finished.
<= failed Failed to activate FollowJointState controller.
<= timeout Timeout reached.
'''
def __init__(self, controller = 'motion/controller/joint_state_head', tolerance = 0.17, timeout = 10.0,
joint_topic = "joint_states", outcomes = ['done', 'failed', 'timeout']):
super(SetJointStateBase, self).__init__(outcomes = outcomes)
# Store topic parameter for later use.
self._controller = controller
self._joint_topic = joint_topic
self._tolerance = tolerance
self._timeout = Duration.from_sec(timeout)
# create proxies
self._action_client = ProxyActionClient({self._controller: SetOperationalAction})
self._pose_publisher = ProxyPublisher({ self._controller + '/in_joints_ref': JointState })
self._pose_subscriber = ProxySubscriberCached({ self._joint_topic: JointState })
# timestamp
self._timestamp = None
# error in enter hook
self._error = False
def load_joint_state_msg(self, pose_ns, pose_param):
# derive parameter full name
if pose_ns:
pose_param = pose_ns + '/' + pose_param
# Load JointState message from Parameter Server
try:
goal_raw = rospy.get_param(pose_param)
except KeyError as e:
raise KeyError, "SetJointStateBase: Unable to get '" + pose_param + "' parameter."
if not isinstance(goal_raw, xmlrpclib.Binary):
raise TypeError, "SetJointStateBase: ROS parameter '" + pose_param + "' is not a binary data."
# deserialize
self._target_joint_state = JointState()
self._target_joint_state.deserialize(goal_raw.data)
# create joint index to simplify tolerance check
self._joint_target_pose = { name: position for name, position in izip(self._target_joint_state.name, self._target_joint_state.position) }
def on_enter(self, userdata):
self._error = False
# activate controller
actiavtion_request = SetOperationalGoal()
actiavtion_request.operational = True
actiavtion_request.resources = self._target_joint_state.name
try:
self._action_client.send_goal(self._controller, actiavtion_request)
except Exception as e:
Logger.logwarn('SetJointStateBase: Failed to send the SetOperational command:\n%s' % str(e))
self._error = True
return
# set start timestamp
self._timestamp = Time.now()
def execute(self, userdata):
# error in start hook
if self._error:
return 'failed'
# check if controller is active
if not self._action_client.is_active(self._controller):
Logger.loginfo('SetJointStateBase: controller was deactivated by external cause.')
return 'failed';
# check if time elasped
if Time.now() - self._timestamp > self._timeout:
Logger.loginfo('SetJointStateBase: controller was deactivated by external cause.')
return 'timeout'
# publish goal pose
self._pose_publisher.publish(self._controller+'/in_joints_ref', self._target_joint_state)
# check tolerance
joints_msg = self._pose_subscriber.get_last_msg(self._joint_topic)
on_position = True
for name, pos in izip(joints_msg.name, joints_msg.position):
target_pos = self._joint_target_pose.get(name)
if (target_pos != None):
if abs(target_pos - pos) > self._tolerance:
on_position = False
break
if on_position:
Logger.loginfo('SetJointStateBase: on position')
return 'done'
def on_exit(self, userdata):
if self._action_client.is_active(self._controller):
try:
self._action_client.cancel(self._controller)
except Exception as e:
Logger.logwarn('SetJointStateBase: failed to deactivate `' + self._controller + '` controller:\n%s' % str(e))
| 42.273438 | 145 | 0.656441 | [
"BSD-3-Clause"
] | sweetie-bot-project/sweetie_bot_flexbe_behaviors | sweetie_bot_flexbe_states/src/sweetie_bot_flexbe_states/internal/set_joint_state_base.py | 5,411 | Python |
import logging
import asyncio
from steam.ext.csgo import Client
from steam.ext.csgo.enums import Language
from steam.ext.csgo.backpack import BaseInspectedItem
from steam.protobufs import GCMsgProto, EMsg, MsgProto
from steam.protobufs.client_server import CMsgClientLicenseListLicense
from steam_tradeoffer_manager.base import SteamBot, SteamBotPool
_log = logging.getLogger(__name__)
# https://steamdb.info/app/730/subs/
_CSGO_PACKAGE_IDS = {
17039,
88535,
54029,
161243,
261665,
14,
211096,
133828,
4,
49,
16236,
16237,
17878,
18702,
18703,
18939,
27267,
29197,
29198,
36071,
39221,
39297,
51835,
51836,
53711,
59228,
62690,
88534,
88541,
88623,
88624,
61,
392171,
61986,
329385,
303386,
63290,
15740,
298963,
298962,
298961,
272766,
199420,
154735,
277644,
273865,
266388,
229740,
226979,
16222,
16223,
16018,
16019,
54030,
63289,
197847,
4116,
11470,
11758,
15990,
17905,
27618,
27762,
35043,
54627,
60765,
62486,
62606,
62688,
113904,
124041,
125313,
}
_CSGO_ID = 730
class InspectBot(SteamBot[int, "InspectPool"], Client):
_licenses: dict[int, CMsgClientLicenseListLicense]
async def on_ready(self) -> None:
await super().on_ready()
await asyncio.sleep(0.1) # ensure licenses event was emitted
for package_id in _CSGO_PACKAGE_IDS:
if package_id in self.licenses:
break
else:
# TODO: errors requesting free license
_log.info(f"Request free CSGO license for {self}")
await self.request_free_license([_CSGO_ID]) # request CSGO license
self.pool.queue.put_nowait(self)
@property
def licenses(self) -> dict[int, CMsgClientLicenseListLicense]:
return getattr(self, "_licenses", {})
async def on_licenses(self, licenses: list[CMsgClientLicenseListLicense]):
self._licenses = {}
for steam_license in licenses:
self.licenses[steam_license.package_id] = steam_license
def timeout(self) -> asyncio.Task:
async def _timeout():
await asyncio.sleep(1)
self.pool.queue.put_nowait(self)
return asyncio.create_task(_timeout())
def request_free_license(self, app_ids: list[int]): # pragma: no cover
return self.ws.send_proto_and_wait(MsgProto(EMsg.ClientRequestFreeLicense, appids=app_ids))
async def inspect_item(self, s: int, a: int, d: int, m: int, timeout: int) -> BaseInspectedItem: # pragma: no cover
await self.ws.send_gc_message(
GCMsgProto(
Language.Client2GCEconPreviewDataBlockRequest,
param_s=s,
param_a=a,
param_d=d,
param_m=m,
)
)
return await self.wait_for("inspect_item_info", timeout=timeout, check=lambda item: item.id == a)
class InspectPool(SteamBotPool[int, InspectBot]):
INSPECT_TIMEOUT: int
def __init__(self) -> None:
super().__init__()
self.queue: asyncio.Queue[InspectBot] = asyncio.Queue()
async def startup(self) -> None:
await super().startup()
# waiting for first bot is ready and then return
bot = await self.queue.get()
self.queue.put_nowait(bot)
async def inspect_item(self, s: int, a: int, d: int, m: int) -> BaseInspectedItem:
bot = await self.queue.get()
try:
item = await bot.inspect_item(s, a, d, m, self.INSPECT_TIMEOUT)
finally:
bot.timeout()
return item
| 22.704819 | 120 | 0.613425 | [
"MIT"
] | somespecialone/clever-inspect | app/services/pool/pool.py | 3,769 | Python |
import numpy as np
class Reward:
pass
class StaticReward(Reward):
def __init__(self, value):
self.value = value
def get(self):
return value
class NormalReward(Reward):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def get(self):
return np.random.normal(self.mean, self.std)
class Bandit:
def __init__(self, arms):
self.no_of_arms = arms
self.arms = [np.random.normal(0, 1) for _ in range(arms)]
def step(self, arm):
return np.random.normal(self.arms[arm], 1)
class MDP:
"""
Represents a Markov Decision Process.
"""
def __init__(self, S, A, R, p):
"""
Parameters
----------
S : int
Number of states
A : matrix
A[s][a] is True iff a is permitted in s
R : list
A list of reward generators
p : matrix
p[s][a][s'] = p(s'|s,a)
"""
self.S = list(range(S))
self.A, self.R, self.p = A, R, p
self.no_of_states = S
self.no_of_actions = len(A[0])
def step(self, s, a):
"""Given a state and an action, returns a new state and a reward.
Parameters
----------
s : int
Current state
a : int
Action to take
"""
s_prime = np.random.choice(self.no_of_states, p = self.p[s][a])
r = self.R[s_prime].get()
return s_prime, r
def epsilon_greedy(no_of_arms, epsilon, Q, N):
if np.random.random() > epsilon:
# greedy
action = np.argmax(Q)
else:
# random
action = np.random.choice(no_of_arms)
return action
def main():
no_of_arms = 10
no_of_steps = 1000
epsilon = 0.1
no_of_runs = 2000
#bandit = Bandit(no_of_arms)
arms = np.random.normal(0, 1, no_of_arms)
S = 1
A = [[True] * no_of_arms]
R = [NormalReward(m, 1) for m in arms]
p = [[[1] for _ in range(no_of_arms)]]
bandit = MDP(S, A, R, p)
#optimal_action = np.argmax(bandit.arms)
optimal_action = np.argmax(arms)
np.random.seed(1)
Q = [[0] * no_of_arms] * no_of_runs
N = [[0] * no_of_arms] * no_of_runs
mean_rewards = [0] * no_of_steps
for j in range(no_of_steps):
for i in range(no_of_runs):
action = epsilon_greedy(no_of_arms, epsilon, Q[i], N[i])
#reward = bandit.step(action)
_, reward = bandit.step(0, action)
mean_rewards[j] += reward
N[i][action] += 1
Q[i][action] += (1 / N[i][action]) * (reward - Q[i][action])
mean_rewards[j] /= no_of_runs
if __name__ == '__main__':
main() | 19 | 67 | 0.634624 | [
"MIT"
] | ronaldosvieira/rl | main.py | 2,299 | Python |
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_2 import models
class Username(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'username': 'str'
}
attribute_map = {
'username': 'username'
}
required_args = {
}
def __init__(
self,
username=None, # type: str
):
"""
Keyword args:
username (str): The username of the user.
"""
if username is not None:
self.username = username
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `Username`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Username, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Username):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.633929 | 105 | 0.53958 | [
"BSD-2-Clause"
] | Flav-STOR-WL/py-pure-client | pypureclient/flasharray/FA_2_2/models/username.py | 3,095 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=39
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=9
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=5
c.append(cirq.Y.on(input_qubit[2])) # number=18
c.append(cirq.Z.on(input_qubit[3])) # number=28
c.append(cirq.H.on(input_qubit[1])) # number=6
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.H.on(input_qubit[3])) # number=20
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=21
c.append(cirq.H.on(input_qubit[3])) # number=22
c.append(cirq.X.on(input_qubit[3])) # number=13
c.append(cirq.H.on(input_qubit[3])) # number=23
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=24
c.append(cirq.H.on(input_qubit[3])) # number=25
c.append(cirq.H.on(input_qubit[0])) # number=33
c.append(cirq.CZ.on(input_qubit[2],input_qubit[0])) # number=34
c.append(cirq.H.on(input_qubit[0])) # number=35
c.append(cirq.H.on(input_qubit[1])) # number=19
c.append(cirq.H.on(input_qubit[0])) # number=15
c.append(cirq.CZ.on(input_qubit[2],input_qubit[0])) # number=16
c.append(cirq.H.on(input_qubit[0])) # number=17
c.append(cirq.Y.on(input_qubit[1])) # number=26
c.append(cirq.Y.on(input_qubit[1])) # number=27
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=29
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=30
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=36
c.append(cirq.X.on(input_qubit[0])) # number=37
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=38
c.append(cirq.X.on(input_qubit[0])) # number=32
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq2615.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | 36.127907 | 77 | 0.679112 | [
"BSD-3-Clause"
] | UCLA-SEAL/QDiff | benchmark/startCirq2615.py | 3,107 | Python |
import simulations.simulation as simulation
import simulations.simulation_runner as simrunner
import cPickle
import os
import random
import re
import string
import subprocess
import sys
from simulations.utils.optionparser import OptionParser
from nose.tools import assert_equal
from nose.tools import assert_raises
def filename_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
class Sim(simulation.Simulation):
def _run(self):
return "runs"
class Sim2(simulation.Simulation):
def _run(self):
print >> self.out, "runs"
return "runs"
class Batch(simrunner.SimulationRunner):
def _add_listeners(self):
self.on('oparser set up', self._set_options)
self.on('options parsed', self._check_options)
self.on('options parsed', self._set_data)
self.on('done', self._when_done)
@staticmethod
def _set_options(self):
self.oparser.add_option("-t", "--test", action="store_true", dest="test", default=False, help="Testing")
@staticmethod
def _check_options(self):
if not self.options.test:
self.oparser.error("Test flag not passed")
@staticmethod
def _set_data(self):
self.data['test'] = self.options.test
@staticmethod
def _when_done(self):
return "test"
class TestSimulation:
def setUp(self):
self.sim = Sim(1, 2, None)
def tearDown(self):
self.sim = None
def test_simulation_init(self):
assert self.sim is not None, "Sim is not set up"
assert_equal(self.sim.data, 1)
assert_equal(self.sim.num, 2)
assert self.sim.outfile is None, "_outfile is not None"
assert self.sim.out is None
assert_equal(self.sim.out_opened, False)
def test_simulation_set_outfile(self):
self.sim.set_output_file("/tmp/test")
assert_equal(self.sim.outfile, "/tmp/test")
assert self.sim.out is None, "Sim.out is set up"
self.sim.is_running = True
self.sim.set_output_file("/tmp/test")
assert self.sim.out is not None, "Sim.out is not set up"
simulation._close_out_fd(self.sim)
assert self.sim.out is None, "Sim.out was not closed"
assert_equal(self.sim.out_opened, False)
simulation._open_out_fd(self.sim)
assert self.sim.out is not None, "Sim.out was not opened"
assert_equal(self.sim.out_opened, True)
self.sim.set_output_file("/tmp/test2")
simulation._open_out_fd(self.sim)
assert self.sim.out is not None, "Sim.out was not opened"
assert_equal(self.sim.out_opened, True)
def test_simulation_run(self):
assert_equal(self.sim.out_opened, False)
self.sim.set_output_file(False)
result = self.sim.run()
assert_equal(self.sim.result, "runs")
assert_equal(result, "runs")
assert_equal(self.sim.out_opened, False)
assert simulation.Simulation._run(self.sim) is None
def test_delegation_method(self):
self.sim.set_output_file(None)
assert_equal(simrunner.run_simulation([Sim, 1, 2, None]), "runs")
class TestSimulationBatch:
def setUp(self):
self.dir = "/tmp/" + filename_generator(8)
self.batch = Batch(Sim2)
def tearDown(self):
self.batch = None
if os.path.isdir(self.dir):
files = os.listdir(self.dir)
for f in files:
if f == "." or f == "..": continue
if f[-8:] == ".testout":
os.remove(self.dir + os.sep + f)
os.rmdir(self.dir)
def test_batch_init(self):
assert self.batch is not None, "Batch is not set up"
assert isinstance(self.batch.oparser, OptionParser), "Option parser is not initialized"
assert self.batch.options is None, "Options is initialized"
assert self.batch.args is None, "Args is initialized"
assert_equal(self.batch.data, {})
assert_equal(self.batch._task_dup_num, False)
assert_equal(len(self.batch.identifier), 6)
assert re.match('[{0}{1}]{{6}}'.format(string.ascii_uppercase, string.digits), self.batch.identifier)
def test_handler_options(self):
sim2 = Batch(Sim2, option_error_handler=2, option_exit_handler=3)
assert_equal(sim2.oparser._errorhandler, 2)
assert_equal(sim2.oparser._exithandler, 3)
def test_batch_option_setup(self):
assert self.batch.oparser.has_option("-D"), "No -D option"
assert self.batch.oparser.has_option("--nofiledump"), "No --nofiledump option"
assert self.batch.oparser.has_option("-F"), "No -F option"
assert self.batch.oparser.has_option("--filename"), "No --filename option"
assert self.batch.oparser.has_option("-N"), "No -N option"
assert self.batch.oparser.has_option("--duplications"), "No --duplications option"
assert self.batch.oparser.has_option("-O"), "No -O option"
assert self.batch.oparser.has_option("--output"), "No --output option"
assert self.batch.oparser.has_option("-P"), "No -P option"
assert self.batch.oparser.has_option("--poolsize"), "No --poolsize option"
assert self.batch.oparser.has_option("-Q"), "No -Q option"
assert self.batch.oparser.has_option("--quiet"), "No --quiet option"
assert self.batch.oparser.has_option("-S"), "No -S option"
assert self.batch.oparser.has_option("--statsfile"), "No --statsfile option"
assert self.batch.oparser.has_option("-t"), "No -t option"
assert self.batch.oparser.has_option("--test"), "No --test option"
def test_batch_go(self):
args = ["-F", "iter_{0}.testout", "-N", "4", "-O", self.dir, "-S", "results.testout", "--test"]
assert self.batch.go(option_args=args) is None
assert_equal(self.batch.options.test, True)
assert_equal(self.batch.options.dup, 4)
assert_equal(self.batch.options.output_dir, self.dir)
assert_equal(self.batch.options.output_file, "iter_{0}.testout")
assert_equal(self.batch.options.file_dump, True)
assert_equal(self.batch.options.stats_file, "results.testout")
## pp stuff
#assert_equal(self.batch.options.pool_size, 'autodetect')
assert self.batch.options.pool_size is None, "Pool size is not None"
assert_equal(self.batch.options.quiet, False)
assert_equal(self.batch.data['test'], True)
for i in range(4):
assert os.path.isfile(self.dir + os.sep + 'iter_{0}.testout'.format(i + 1)), "Dup file {0} is missing".format(i + 1)
assert os.path.isfile(self.dir + os.sep + 'results.testout'), "Results file is missing"
for i in range(4):
with open(self.dir + os.sep + 'iter_{0}.testout'.format(i + 1), "r") as dup_file:
assert_equal(dup_file.read(), "runs\n")
with open(self.dir + os.sep + 'results.testout', "r") as results_file:
should_be = ''
should_be += cPickle.dumps(self.batch.options) + "\n"
should_be += "\n"
for _ in range(4):
should_be += cPickle.dumps("runs") + "\n"
should_be += "\n"
assert_equal(results_file.read(), should_be)
def test_batch_go2(self):
args = ["-N", "6", "-P", "2", "-O", self.dir, "-S", "results.testout", "-Q", "--test", "-D"]
assert self.batch.go(option_args=args) is None
assert_equal(self.batch.options.test, True)
assert_equal(self.batch.options.dup, 6)
assert_equal(self.batch.options.output_dir, self.dir)
assert_equal(self.batch.options.output_file, "duplication_{0}")
assert_equal(self.batch.options.file_dump, False)
assert_equal(self.batch.options.stats_file, "results.testout")
assert_equal(self.batch.options.pool_size, 2)
assert_equal(self.batch.options.quiet, True)
assert_equal(self.batch.data['test'], True)
for i in range(6):
assert not os.path.isfile(self.dir + os.sep + 'iter_{0}.testout'.format(i + 1)), "Dup file {0} is missing".format(i + 1)
assert os.path.isfile(self.dir + os.sep + 'results.testout'), "Results file is missing"
with open(self.dir + os.sep + 'results.testout', "r") as results_file:
should_be = ''
should_be += cPickle.dumps(self.batch.options) + "\n"
should_be += "\n"
for _ in range(6):
should_be += cPickle.dumps("runs") + "\n"
should_be += "\n"
assert_equal(results_file.read(), should_be)
def test_batch_go3(self):
args = ["-N", "6", "-P", "1", "-O", self.dir, "-S", "results.testout", "--test", "-D"]
assert self.batch.go(option_args=args) is None
assert_equal(self.batch.options.test, True)
assert_equal(self.batch.options.dup, 6)
assert_equal(self.batch.options.output_dir, self.dir)
assert_equal(self.batch.options.output_file, "duplication_{0}")
assert_equal(self.batch.options.file_dump, False)
assert_equal(self.batch.options.stats_file, "results.testout")
assert_equal(self.batch.options.pool_size, 1)
assert_equal(self.batch.options.quiet, False)
assert_equal(self.batch.data['test'], True)
for i in range(6):
assert not os.path.isfile(self.dir + os.sep + 'iter_{0}.testout'.format(i + 1)), "Dup file {0} is missing".format(i + 1)
assert os.path.isfile(self.dir + os.sep + 'results.testout'), "Results file is missing"
with open(self.dir + os.sep + 'results.testout', "r") as results_file:
should_be = ''
should_be += cPickle.dumps(self.batch.options) + "\n"
should_be += "\n"
for _ in range(6):
should_be += cPickle.dumps("runs") + "\n"
should_be += "\n"
assert_equal(results_file.read(), should_be)
def test_option_failure(self):
args = ["-N", "-6", "-P", "2", "-O", self.dir, "-S", "results.testout", "-Q", "-D", "--test"]
assert_raises(SystemExit, self.batch.go, option_args=args)
assert_raises(SystemExit, self.batch.go, option_values=None)
def test_option_failure2(self):
args = ["-N", "6", "-P", "2", "-O", self.dir, "-S", "results.testout", "-Q", "-D"]
assert_raises(SystemExit, self.batch.go, option_args=args)
def test_option_failure3(self):
args = ["-N", "6", "-P", "-1", "-O", self.dir, "-S", "results.testout", "-Q", "-D", "--test"]
assert_raises(SystemExit, self.batch.go, option_args=args)
## pp stuff
#class TestClustering:
#
# def setUp(self):
# self.secret = filename_generator(6)
# self.server = subprocess.Popen(["ppserver.py", "-s", self.secret])
# self.batch = Batch(Sim2)
# self.dir = "/tmp/" + filename_generator(8)
#
# def tearDown(self):
# self.batch = None
# self.server.terminate()
# if os.path.isdir(self.dir):
# files = os.listdir(self.dir)
# for f in files:
# if f == "." or f == "..": continue
# if f[-8:] == ".testout":
# os.remove(self.dir + os.sep + f)
# os.rmdir(self.dir)
#
# def test_batch_cluster_go(self):
# args = ["-F", "iter_{0}.testout", "-N", "4", "-P", "2", "-O", self.dir, "-S", "results.testout", "--test", "--cluster=127.0.0.1", "--clustersecret="+self.secret]
# assert self.batch.go(option_args=args) is None
# assert_equal(self.batch.options.test, True)
# assert_equal(self.batch.options.dup, 4)
# assert_equal(self.batch.options.output_dir, self.dir)
# assert_equal(self.batch.options.output_file, "iter_{0}.testout")
# assert_equal(self.batch.options.file_dump, True)
# assert_equal(self.batch.options.stats_file, "results.testout")
# assert_equal(self.batch.options.pool_size, 2)
# assert_equal(self.batch.options.quiet, False)
# assert_equal(self.batch.options.cluster_string, '127.0.0.1')
# assert_equal(self.batch.options.cluster_secret, self.secret)
#
# assert_equal(self.batch.data['test'], True)
#
# for i in range(4):
# assert os.path.isfile(self.dir + os.sep + 'iter_{0}.testout'.format(i + 1)), "Dup file {0} is missing".format(i + 1)
# assert os.path.isfile(self.dir + os.sep + 'results.testout'), "Results file is missing"
#
# for i in range(4):
# with open(self.dir + os.sep + 'iter_{0}.testout'.format(i + 1), "r") as dup_file:
# assert_equal(dup_file.read(), "runs\n")
#
# with open(self.dir + os.sep + 'results.testout', "r") as results_file:
# should_be = ''
# should_be += cPickle.dumps(self.batch.options) + "\n"
# should_be += "\n"
# for _ in range(4):
# should_be += cPickle.dumps("runs") + "\n"
# should_be += "\n"
# assert_equal(results_file.read(), should_be)
#
| 41.625397 | 171 | 0.616306 | [
"MIT"
] | gsmcwhirter/simulations | test/simulation_tests.py | 13,112 | Python |
"""Support for Blockchain.com sensors."""
from datetime import timedelta
import logging
from pyblockchain import get_balance, validate_address
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by blockchain.com"
CONF_ADDRESSES = "addresses"
DEFAULT_NAME = "Bitcoin Balance"
ICON = "mdi:currency-btc"
SCAN_INTERVAL = timedelta(minutes=5)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ADDRESSES): [cv.string],
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Blockchain.com sensors."""
addresses = config[CONF_ADDRESSES]
name = config[CONF_NAME]
for address in addresses:
if not validate_address(address):
_LOGGER.error("Bitcoin address is not valid: %s", address)
return False
add_entities([BlockchainSensor(name, addresses)], True)
class BlockchainSensor(Entity):
"""Representation of a Blockchain.com sensor."""
def __init__(self, name, addresses):
"""Initialize the sensor."""
self._name = name
self.addresses = addresses
self._state = None
self._unit_of_measurement = "BTC"
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement this sensor expresses itself in."""
return self._unit_of_measurement
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return ICON
@property
def extra_state_attributes(self):
"""Return the state attributes of the sensor."""
return {ATTR_ATTRIBUTION: ATTRIBUTION}
def update(self):
"""Get the latest state of the sensor."""
self._state = get_balance(self.addresses)
| 26.453488 | 77 | 0.688791 | [
"Apache-2.0"
] | CantankerousBullMoose/core | homeassistant/components/blockchain/sensor.py | 2,275 | Python |
from typing import Any
from copy import deepcopy
class Model:
def __init__(self, name: str, model, freq: str):
self.name = name
self.model = model
self.freq = freq
self.train = None
self.test = None
self.prediction = None
self.pred_col = "prediction"
self.y_col = "y"
self.date_col = "ds"
def fit(self, train_dataset):
"Performs model training with standard settings"
self.train = deepcopy(train_dataset)
if "orbit" in self.name:
self.model.fit(self.train)
elif "nprophet" in self.name:
self.model.fit(self.train, validate_each_epoch=True,
valid_p=0.2, freq=self.freq,
plot_live_loss=True, epochs=100)
def predict(self, dataset: Any):
"Performs prediction"
self.test = deepcopy(dataset)
if "orbit" in self.name:
prediction = self.model.predict(self.test)
elif "nprophet" in self.name:
future = self.model.make_future_dataframe(self.train, periods=len(self.test))
prediction = self.model.predict(future).rename(columns={"yhat1": self.pred_col})
prediction = prediction[[self.date_col, self.pred_col]]
self.prediction = prediction
return self.prediction
| 27.612245 | 92 | 0.597931 | [
"MIT"
] | MiguelMque/eafit-numerical-analysis-project | interpolML/interpolML/model/model.py | 1,353 | Python |
#!/usr/bin/python
# https://practice.geeksforgeeks.org/problems/knapsack-with-duplicate-items/0
def sol(n, w, wt, v):
"""
We do not need to create a 2d array here because all numbers are available
always
Try all items for weight ranging from 1 to w and check if weight
can be picked. Take the max of the result
"""
dp = [0 for i in range(w+1)]
for i in range(n):
for j in range(w+1):
if wt[i] <= j:
dp[j] = max(dp[j], v[i]+dp[j-wt[i]])
return dp[w] | 30.941176 | 78 | 0.587452 | [
"Apache-2.0"
] | vikas-t/DS-Algo | full-problems/knapsackWithDuplicates.py | 526 | Python |
# -*- coding: utf-8 -*-
from .amount import Amount
from .instance import BlockchainInstance
from graphenecommon.account import (
Account as GrapheneAccount,
AccountUpdate as GrapheneAccountUpdate,
)
from bitsharesbase import operations
@BlockchainInstance.inject
class Account(GrapheneAccount):
"""
This class allows to easily access Account data.
:param str account_name: Name of the account
:param bitshares.bitshares.BitShares blockchain_instance: BitShares
instance
:param bool full: Obtain all account data including orders, positions, etc.
:param bool lazy: Use lazy loading
:param bool full: Obtain all account data including orders, positions,
etc.
:returns: Account data
:rtype: dictionary
:raises bitshares.exceptions.AccountDoesNotExistsException: if account
does not exist
Instances of this class are dictionaries that come with additional
methods (see below) that allow dealing with an account and it's
corresponding functions.
.. code-block:: python
from bitshares.account import Account
account = Account("init0")
print(account)
.. note:: This class comes with its own caching function to reduce the
load on the API server. Instances of this class can be
refreshed with ``Account.refresh()``.
"""
def define_classes(self):
self.type_id = 2
self.amount_class = Amount
self.operations = operations
@property
def call_positions(self):
"""Alias for :func:bitshares.account.Account.callpositions."""
return self.callpositions()
@property
def callpositions(self):
"""List call positions (collateralized positions :doc:`mpa`)"""
self.ensure_full()
from .dex import Dex
dex = Dex(blockchain_instance=self.blockchain)
return dex.list_debt_positions(self)
@property
def openorders(self):
"""Returns open Orders."""
from .price import Order
self.ensure_full()
return [
Order(o, blockchain_instance=self.blockchain) for o in self["limit_orders"]
]
@BlockchainInstance.inject
class AccountUpdate(GrapheneAccountUpdate):
"""
This purpose of this class is to keep track of account updates as they are pushed
through by :class:`bitshares.notify.Notify`.
Instances of this class are dictionaries and take the following
form:
.. code-block: js
{'id': '2.6.29',
'lifetime_fees_paid': '44261516129',
'most_recent_op': '2.9.0',
'owner': '1.2.29',
'pending_fees': 0,
'pending_vested_fees': 16310,
'total_core_in_orders': '6788845277634',
'total_ops': 0}
"""
def define_classes(self):
self.account_class = Account
| 29.729167 | 87 | 0.662929 | [
"MIT"
] | bangzi1001/python-nbs | bitshares/account.py | 2,854 | Python |
"""BGEN reader implementation (using bgen_reader)"""
import logging
import tempfile
import time
from pathlib import Path
from typing import (
Any,
Dict,
Hashable,
List,
Mapping,
MutableMapping,
Optional,
Tuple,
Union,
)
import dask
import dask.array as da
import dask.dataframe as dd
import numpy as np
import pandas as pd
import xarray as xr
import zarr
from cbgen import bgen_file, bgen_metafile
from rechunker import api as rechunker_api
from xarray import Dataset
from sgkit import create_genotype_dosage_dataset
from sgkit.io.utils import dataframe_to_dict, encode_contigs
from sgkit.typing import ArrayLike, DType, NDArray, PathType
logger = logging.getLogger(__name__)
GT_DATA_VARS = [
"call_genotype_probability",
"call_genotype_probability_mask",
"call_dosage",
"call_dosage_mask",
]
METAFILE_DTYPE = dict(
[
("id", "S"),
("rsid", "S"),
("chrom", "S"),
("pos", "int32"),
("a1", "S"),
("a2", "S"),
("offset", "int64"),
]
)
class BgenReader:
name = "bgen_reader"
def __init__(
self,
path: PathType,
metafile_path: Optional[PathType] = None,
dtype: DType = "float32",
) -> None:
self.path = Path(path)
self.metafile_path = (
Path(metafile_path) if metafile_path else self.path.with_suffix(".metafile")
)
with bgen_file(self.path) as bgen:
self.n_variants = bgen.nvariants
self.n_samples = bgen.nsamples
if not self.metafile_path.exists():
start = time.time()
logger.info(
f"Generating BGEN metafile for '{self.path}' (this may take a while)"
)
bgen.create_metafile(self.metafile_path, verbose=False)
stop = time.time()
logger.info(
f"BGEN metafile generation complete ({stop - start:.0f} seconds)"
)
with bgen_metafile(self.metafile_path) as mf:
assert self.n_variants == mf.nvariants
self.npartitions = mf.npartitions
self.partition_size = mf.partition_size
self.shape = (self.n_variants, self.n_samples, 3)
self.dtype = np.dtype(dtype)
self.precision = 64 if self.dtype.itemsize >= 8 else 32
self.ndim = 3
def __getitem__(self, idx: Any) -> NDArray:
if not isinstance(idx, tuple):
raise IndexError(f"Indexer must be tuple (received {type(idx)})")
if len(idx) != self.ndim:
raise IndexError(
f"Indexer must have {self.ndim} items (received {len(idx)} slices)"
)
if not all(isinstance(i, slice) or isinstance(i, int) for i in idx):
raise IndexError(
f"Indexer must contain only slices or ints (received types {[type(i) for i in idx]})"
)
# Determine which dims should have unit size in result
squeeze_dims = tuple(i for i in range(len(idx)) if isinstance(idx[i], int))
# Convert all indexers to slices
idx = tuple(slice(i, i + 1) if isinstance(i, int) else i for i in idx)
if idx[0].start == idx[0].stop:
return np.empty((0,) * self.ndim, dtype=self.dtype)
# Determine start and end partitions that correspond to the
# given variant dimension indexer
start_partition = idx[0].start // self.partition_size
start_partition_offset = idx[0].start % self.partition_size
end_partition = (idx[0].stop - 1) // self.partition_size
end_partition_offset = (idx[0].stop - 1) % self.partition_size
# Create a list of all offsets into the underlying file at which
# data for each variant begins
all_vaddr = []
with bgen_metafile(self.metafile_path) as mf:
for i in range(start_partition, end_partition + 1):
partition = mf.read_partition(i)
start_offset = start_partition_offset if i == start_partition else 0
end_offset = (
end_partition_offset + 1
if i == end_partition
else self.partition_size
)
vaddr = partition.variants.offset
all_vaddr.extend(vaddr[start_offset:end_offset].tolist())
# Read the probabilities for each variant, apply indexer for
# samples dimension to give probabilities for all genotypes,
# and then apply final genotype dimension indexer
with bgen_file(self.path) as bgen:
res = None
for i, vaddr in enumerate(all_vaddr):
probs = bgen.read_probability(vaddr, precision=self.precision)[idx[1]]
assert len(probs.shape) == 2 and probs.shape[1] == 3
if res is None:
res = np.zeros((len(all_vaddr), len(probs), 3), dtype=self.dtype)
res[i] = probs
res = res[..., idx[2]] # type: ignore[index]
return np.squeeze(res, axis=squeeze_dims)
def _split_alleles(allele_ids: bytes) -> List[bytes]:
alleles = allele_ids.split(b",")
if len(alleles) != 2:
raise NotImplementedError(
f"Bgen reads only supported for biallelic variants (found non-biallelic variant '{str(allele_ids)}')"
)
return alleles
def _read_metafile_partition(path: Path, partition: int) -> pd.DataFrame:
with bgen_metafile(path) as mf:
part = mf.read_partition(partition)
v = part.variants
allele_ids = np.array([_split_alleles(aid) for aid in v.allele_ids])
data = {
"id": v.id,
"rsid": v.rsid,
"chrom": v.chromosome,
"pos": v.position,
"a1": allele_ids[:, 0],
"a2": allele_ids[:, 1],
"offset": v.offset,
}
return pd.DataFrame(data).astype(METAFILE_DTYPE)
def read_metafile(path: PathType) -> dd.DataFrame:
"""Read cbgen metafile containing partitioned variant info"""
with bgen_metafile(path) as mf:
divisions = [mf.partition_size * i for i in range(mf.npartitions)] + [
mf.nvariants - 1
]
dfs = [
dask.delayed(_read_metafile_partition)(path, i)
for i in range(mf.npartitions)
]
meta = dd.utils.make_meta(METAFILE_DTYPE)
return dd.from_delayed(dfs, meta=meta, divisions=divisions)
def read_samples(path: PathType) -> pd.DataFrame:
"""Read BGEN .sample file"""
df = pd.read_csv(path, sep=" ", skiprows=[1], usecols=[0])
df.columns = ["sample_id"]
return df
def read_bgen(
path: PathType,
metafile_path: Optional[PathType] = None,
sample_path: Optional[PathType] = None,
chunks: Union[str, int, Tuple[int, int, int]] = "auto",
lock: bool = False,
persist: bool = True,
contig_dtype: DType = "str",
gp_dtype: DType = "float32",
) -> Dataset:
"""Read BGEN dataset.
Loads a single BGEN dataset as dask arrays within a Dataset
from a ``.bgen`` file.
Parameters
----------
path
Path to BGEN file.
metafile_path
Path to companion index file used to determine BGEN byte offsets.
Defaults to ``path`` + ".metafile" if not provided.
This file is necessary for reading BGEN genotype probabilities and it will be
generated the first time the file is read if it does not already exist.
If it needs to be created, it can make the first call to this function
much slower than subsequent calls.
sample_path
Path to ``.sample`` file, by default None. This is used to fetch sample identifiers
and when provided it is preferred over sample identifiers embedded in the ``.bgen`` file.
chunks
Chunk size for genotype probability data (3 dimensions),
by default "auto".
lock
Whether or not to synchronize concurrent reads of
file blocks, by default False. This is passed through to
[dask.array.from_array](https://docs.dask.org/en/latest/array-api.html#dask.array.from_array).
persist
Whether or not to persist variant information in memory, by default True.
This is an important performance consideration as the metadata file for this data will
be read multiple times when False.
contig_dtype
Data type for contig names, by default "str".
This may also be an integer type (e.g. "int"), but will fail if any of the contig names
cannot be converted to integers.
gp_dtype
Data type for genotype probabilities, by default "float32".
Warnings
--------
Only bi-allelic, diploid BGEN files are currently supported.
Returns
-------
A dataset containing the following variables:
- :data:`sgkit.variables.variant_id_spec` (variants)
- :data:`sgkit.variables.variant_contig_spec` (variants)
- :data:`sgkit.variables.variant_position_spec` (variants)
- :data:`sgkit.variables.variant_allele_spec` (variants)
- :data:`sgkit.variables.sample_id_spec` (samples)
- :data:`sgkit.variables.call_dosage_spec` (variants, samples)
- :data:`sgkit.variables.call_dosage_mask_spec` (variants, samples)
- :data:`sgkit.variables.call_genotype_probability_spec` (variants, samples, genotypes)
- :data:`sgkit.variables.call_genotype_probability_mask_spec` (variants, samples, genotypes)
"""
if isinstance(chunks, tuple) and len(chunks) != 3:
raise ValueError(f"`chunks` must be tuple with 3 items, not {chunks}")
if not np.issubdtype(gp_dtype, np.floating):
raise ValueError(
f"`gp_dtype` must be a floating point data type, not {gp_dtype}"
)
if not np.issubdtype(contig_dtype, np.integer) and np.dtype(
contig_dtype
).kind not in {"U", "S"}:
raise ValueError(
f"`contig_dtype` must be of string or int type, not {contig_dtype}"
)
path = Path(path)
sample_path = Path(sample_path) if sample_path else path.with_suffix(".sample")
if sample_path.exists():
sample_id = read_samples(sample_path).sample_id.values.astype("U")
else:
sample_id = _default_sample_ids(path)
bgen_reader = BgenReader(path, metafile_path=metafile_path, dtype=gp_dtype)
df = read_metafile(bgen_reader.metafile_path)
if persist:
df = df.persist()
arrs = dataframe_to_dict(df, METAFILE_DTYPE)
variant_id = arrs["id"]
variant_contig: ArrayLike = arrs["chrom"].astype(contig_dtype)
variant_contig, variant_contig_names = encode_contigs(variant_contig)
variant_contig_names = list(variant_contig_names)
variant_position = arrs["pos"]
variant_allele = da.hstack((arrs["a1"][:, np.newaxis], arrs["a2"][:, np.newaxis]))
call_genotype_probability = da.from_array(
bgen_reader,
chunks=chunks,
lock=lock,
fancy=False,
asarray=False,
name=f"{bgen_reader.name}:read_bgen:{path}",
)
call_dosage = _to_dosage(call_genotype_probability)
ds: Dataset = create_genotype_dosage_dataset(
variant_contig_names=variant_contig_names,
variant_contig=variant_contig,
variant_position=variant_position,
variant_allele=variant_allele,
sample_id=sample_id,
call_dosage=call_dosage,
call_genotype_probability=call_genotype_probability,
variant_id=variant_id,
)
return ds
def _default_sample_ids(path: PathType) -> ArrayLike:
"""Fetch or generate sample ids"""
with bgen_file(path) as bgen:
if bgen.contain_samples:
return bgen.read_samples()
else:
return np.char.add(b"sample_", np.arange(bgen.nsamples).astype("S")) # type: ignore[no-untyped-call]
def _to_dosage(probs: ArrayLike) -> ArrayLike:
"""Calculate the dosage from genotype likelihoods (probabilities)"""
assert (
probs.shape[-1] == 3
), f"Expecting genotype (trailing) dimension of size 3, got array of shape {probs.shape}"
return probs[..., 1] + 2 * probs[..., 2]
########################
# Rechunking Functions #
########################
def encode_variables(
ds: Dataset,
chunk_length: int,
chunk_width: int,
compressor: Optional[Any] = zarr.Blosc(cname="zstd", clevel=7, shuffle=2),
probability_dtype: Optional[Any] = "uint8",
) -> Dict[Hashable, Dict[str, Any]]:
encoding = {}
for v in ds:
e = {}
if compressor is not None:
e.update({"compressor": compressor})
if v in GT_DATA_VARS:
e.update({"chunks": (chunk_length, chunk_width) + ds[v].shape[2:]})
if probability_dtype is not None and v == "call_genotype_probability":
dtype = np.dtype(probability_dtype)
# Xarray will decode into float32 so any int greater than
# 16 bits will cause overflow/underflow
# See https://en.wikipedia.org/wiki/Floating-point_arithmetic#Internal_representation
# *bits precision column for single precision floats
if dtype not in [np.uint8, np.uint16]: # type: ignore[comparison-overlap]
raise ValueError(
"Probability integer dtype invalid, must "
f"be uint8 or uint16 not {probability_dtype}"
)
divisor = np.iinfo(dtype).max - 1
e.update(
{
"dtype": probability_dtype,
"add_offset": -1.0 / divisor,
"scale_factor": 1.0 / divisor,
"_FillValue": 0,
}
)
if e:
encoding[v] = e
return encoding
def pack_variables(ds: Dataset) -> Dataset:
# Remove dosage as it is unnecessary and should be redefined
# based on encoded probabilities later (w/ reduced precision)
ds = ds.drop_vars(["call_dosage", "call_dosage_mask"], errors="ignore")
# Remove homozygous reference GP and redefine mask
gp = ds["call_genotype_probability"][..., 1:]
gp_mask = ds["call_genotype_probability_mask"].any(dim="genotypes")
ds = ds.drop_vars(["call_genotype_probability", "call_genotype_probability_mask"])
ds = ds.assign(call_genotype_probability=gp, call_genotype_probability_mask=gp_mask)
return ds
def unpack_variables(ds: Dataset, dtype: DType = "float32") -> Dataset:
# Restore homozygous reference GP
gp = ds["call_genotype_probability"].astype(dtype)
if gp.sizes["genotypes"] != 2:
raise ValueError(
"Expecting variable 'call_genotype_probability' to have genotypes "
f"dimension of size 2 (received sizes = {dict(gp.sizes)})"
)
ds = ds.drop_vars("call_genotype_probability")
ds["call_genotype_probability"] = xr.concat(
[1 - gp.sum(dim="genotypes", skipna=False), gp], dim="genotypes"
)
# Restore dosage
ds["call_dosage"] = gp[..., 0] + 2 * gp[..., 1]
ds["call_dosage_mask"] = ds["call_genotype_probability_mask"]
ds["call_genotype_probability_mask"] = ds[
"call_genotype_probability_mask"
].broadcast_like(ds["call_genotype_probability"])
return ds
def rechunk_bgen(
ds: Dataset,
output: Union[PathType, MutableMapping[str, bytes]],
*,
chunk_length: int = 10_000,
chunk_width: int = 1_000,
compressor: Optional[Any] = zarr.Blosc(cname="zstd", clevel=7, shuffle=2),
probability_dtype: Optional[DType] = "uint8",
max_mem: str = "4GB",
pack: bool = True,
tempdir: Optional[PathType] = None,
) -> Dataset:
"""Rechunk BGEN dataset as Zarr.
This function will use the algorithm https://rechunker.readthedocs.io/en/latest/
to rechunk certain fields in a provided Dataset for better downstream performance.
Depending on the system memory available (and the `max_mem` setting) this
rechunking may occur without the need of any intermediate data store. Otherwise,
approximately as much disk space is required as was needed to store the original
BGEN data. Experiments show that this Zarr representation is ~20% larger even
with all available optimizations and fairly aggressive compression (i.e. the
default `clevel` 7).
Note that this function is not evaluated lazily. The rechunking algorithm
will run inline so calls to it may be slow. The resulting Dataset is
generated based on the final, serialized Zarr data.
Parameters
----------
ds
Dataset to rechunk, typically the result from `read_bgen`.
output
Zarr store or path to directory in file system.
chunk_length
Length (number of variants) of chunks in which data are stored, by default 10_000.
chunk_width
Width (number of samples) to use when storing chunks in output, by default 1_000.
compressor
Zarr compressor, no compression is used when set as None.
probability_dtype
Data type used to encode genotype probabilities, must be either uint8 or uint16.
Setting this parameter results in a loss of precision. If None, probabilities
will not be altered when stored.
max_mem
The amount of memory (in bytes) that workers are allowed to use. A string
(e.g. 100MB) can also be used.
pack
Whether or not to optimize variable representations by removing unnecessary
dimensions and elements. This includes storing 2 genotypes instead of 3, omitting
dosage and collapsing the genotype probability mask to 2 dimensions. All of
the above are restored in the resulting Dataset at the expense of extra
computations on read.
tempdir
Temporary directory where intermediate files are stored. The default None means
use the system default temporary directory.
Warnings
--------
This functional is only applicable to diploid, bi-allelic BGEN datasets.
Returns
-------
Dataset
The rechunked dataset.
"""
if isinstance(output, Path):
output = str(output)
chunk_length = min(chunk_length, ds.dims["variants"])
chunk_width = min(chunk_width, ds.dims["samples"])
if pack:
ds = pack_variables(ds)
encoding = encode_variables(
ds,
chunk_length=chunk_length,
chunk_width=chunk_width,
compressor=compressor,
probability_dtype=probability_dtype,
)
target_chunks = {
var: encoding[var]["chunks"] for var in encoding if "chunks" in encoding[var]
}
target_options = {
var: {k: v for k, v in encoding[var].items() if k != "chunks"}
for var in encoding
}
with tempfile.TemporaryDirectory(
prefix="bgen_to_zarr_", suffix=".zarr", dir=tempdir
) as tmpdir:
rechunked = rechunker_api.rechunk(
ds,
max_mem=max_mem,
target_chunks=target_chunks,
target_store=output,
target_options=target_options,
temp_store=tmpdir,
executor="dask",
)
rechunked.execute()
zarr.consolidate_metadata(output)
ds: Dataset = xr.open_zarr(output, concat_characters=False) # type: ignore[no-untyped-call]
if pack:
ds = unpack_variables(ds)
return ds
def bgen_to_zarr(
input: PathType,
output: Union[PathType, MutableMapping[str, bytes]],
region: Optional[Mapping[Hashable, Any]] = None,
chunk_length: int = 10_000,
chunk_width: int = 1_000,
temp_chunk_length: int = 100,
compressor: Optional[Any] = zarr.Blosc(cname="zstd", clevel=7, shuffle=2),
probability_dtype: Optional[DType] = "uint8",
max_mem: str = "4GB",
pack: bool = True,
tempdir: Optional[PathType] = None,
) -> Dataset:
"""Convert a BGEN file to a Zarr on-disk store.
This function is a convenience for calling :func:`read_bgen` followed by
:func:`rechunk_bgen`.
Parameters
----------
input
Path to local BGEN dataset.
output
Zarr store or path to directory in file system.
region
Indexers on dataset dimensions used to define a subset of data to convert.
Must be None or a dict with keys matching dimension names and values
equal to integers or slice objects. This is passed directly to `Dataset.isel`
so it has the same semantics.
chunk_length
Length (number of variants) of chunks in which data are stored, by default 10_000.
chunk_width
Width (number of samples) to use when storing chunks in output, by default 1_000.
temp_chunk_length
Length of chunks used in raw BGEN read, by default 100. This defines the vertical
chunking (i.e. in the variants dimension) used when reading the raw data and because
there is no horizontal chunking at this phase (i.e. in the samples dimension), this
value should be much smaller than the target `chunk_length`.
compressor
Zarr compressor, by default Blosc + zstd with compression level 7. No compression
is used when set as None.
probability_dtype
Data type used to encode genotype probabilities, must be either uint8 or uint16.
Setting this parameter results in a loss of precision. If None, probabilities
will not be altered when stored.
max_mem
The amount of memory (in bytes) that workers are allowed to use. A string
(e.g. 100MB) can also be used.
pack
Whether or not to optimize variable representations by removing unnecessary
dimensions and elements. This includes storing 2 genotypes instead of 3, omitting
dosage and collapsing the genotype probability mask to 2 dimensions. All of
the above are restored in the resulting Dataset at the expense of extra
computations on read.
tempdir
Temporary directory where intermediate files are stored. The default None means
use the system default temporary directory.
Warnings
--------
This functional is only applicable to diploid, bi-allelic BGEN datasets.
Returns
-------
Dataset
The rechunked dataset.
"""
ds = read_bgen(input, chunks=(temp_chunk_length, -1, -1))
if region is not None:
ds = ds.isel(indexers=region)
return rechunk_bgen(
ds,
output,
chunk_length=chunk_length,
chunk_width=chunk_width,
compressor=compressor,
probability_dtype=probability_dtype,
max_mem=max_mem,
pack=pack,
tempdir=tempdir,
)
| 36.959083 | 113 | 0.64299 | [
"Apache-2.0"
] | pystatgen/sgk | sgkit/io/bgen/bgen_reader.py | 22,582 | Python |
import inspect
import os
import pyperclip
import requests
import time
from urllib.parse import quote
# a list of the request error classes
request_errors = [obj for name, obj in inspect.getmembers(requests.exceptions)
if inspect.isclass(obj) and issubclass(obj, Exception)]
# main daemon loop
while True:
# get clipboard value
clipboard = pyperclip.paste()
try:
# percent encode the clipboard value
safe_cb = quote(clipboard,safe='')
# bitly API access token
token = os.environ.get('BITLY_TOKEN')
# URL that will make the API call
bitly_url = 'https://api-ssl.bitly.com/v3/shorten?' + \
'access_token=' + token + '&longUrl=' + safe_cb
# get the json return from the API call
short_url = requests.get(bitly_url).json()
# if everything went as planned
if(short_url['status_txt'] == 'OK'):
pyperclip.copy(short_url['data']['url'])
except Exception as e:
# if something went wrong with the request, i.e. not a link
if(any(issubclass(e.__class__, lv) for lv in request_errors)):
pass
else:
raise(e)
# wait until the clipboard changes
while(pyperclip.paste() == clipboard):
time.sleep(.1)
| 34.972973 | 78 | 0.629057 | [
"MIT"
] | lawja/AutoSHRTNR | autoshort.py | 1,294 | Python |
# source ./venv/bin/activate
# ===============================================================
# =============================COOL==============================
# ===============================================================
import sys
from general import errors
# import os
# basedir = os.path.abspath(os.path.dirname(__file__))
# ===============================================================
def main():
# TAKE THE INPUT
programs = sys.argv[1:]
# CHECK IF AT LEAST ONE FILE IS GIVEN
if len(programs) == 0:
errors.throw_error(errors.CompilerError(text="No file is given to coolc compiler."))
# CHECK IF FILEOUT IS GIVEN
if programs[0] == '-o':
if len(programs) == 1:
errors.throw_error(errors.CompilerError(text="No fileout is given to coolc compiler."))
fileout = programs[1]
if not str(fileout).endswith(".asm"):
errors.throw_error(errors.CompilerError(text="Fileout must end with .asm extension."))
if len(programs) == 2:
errors.throw_error(errors.CompilerError(text="No file is given to coolc compiler."))
programs = programs[2:]
else:
fileout = programs[0].split(".cl")[0] + ".asm"
# Check all programs have the *.cl extension.
for program in programs:
if not str(program).endswith(".cl"):
errors.throw_error(errors.CompilerError(text="Cool program files must end with a .cl extension."))
code = ""
# Read all program source codes.
for program in programs:
try:
with open(program, encoding="utf-8") as file:
code += file.read() + '\n'
except (IOError, FileNotFoundError):
errors.throw_error(errors.CompilerError(text=f'File "{program}" was not found.'))
except Exception:
errors.throw_error(errors.CompilerError(text="An unexpected error occurred!"))
print(f"Compiling file '{fileout}'...")
# ===============================================================
# ==================ANALISIS-LEXICOGRAFICO=======================
# ===============================================================
from lexicography.lexer_rules import CoolLex
# BUILD THE LEXER
lexer = CoolLex()
lexer.build()
# ===============================================================
# ===============================================================
# =====================ANALISIS-SINTACTICO=======================
# ===============================================================
from lexicography.grammar_rules import CoolParse
# BUILD THE PARSER
parser = CoolParse(lexer)
parser.build()
program_ast = parser.parse(code)
# ===============================================================
# ===============================================================
# ======================ANALISIS-SEMANTICO=======================
# ===============================================================
from semantic.type_collector import TypeCollectorVisitor
from semantic.type_builder import TypeBuilderVisitor
from semantic.type_checker import TypeCheckerVisitor
# from semantic.ast_types_painter import Painter
typeCollector = TypeCollectorVisitor()
typeCollector.visit(program_ast)
typeBuilder = TypeBuilderVisitor(typeCollector.enviroment)
typeBuilder.visit(program_ast)
## CHECK SEMANTIC ERRORS IN THE ENVIROMENT(check_main, cycles and inheritance rules)
final_enviroment = typeBuilder.enviroment
final_enviroment.build_types_graph()
type_checker = TypeCheckerVisitor()
type_checker.visit(program_ast, typeBuilder.enviroment)
typed_ast = program_ast
# ast_painter = Painter()
# print(ast_painter.visit(typed_ast, 0))
# ===============================================================
# ===============================================================
# ========================CODE-GENERATION========================
# ===============================================================
# COOL --> CIL
from generation.cil.cil_generator import CilGeneratorVisitor
# from general.cil_hierarchy import get_formatter
cil_code_generator = CilGeneratorVisitor(typed_ast, typeBuilder.enviroment)
ast_cil = cil_code_generator.generate_code()
# cil_painter = get_formatter()
# print(cil_painter(ast_cil))
# CIL --> MIPS
from generation.mips.mips_writer import MIPSWriterVisitor
from operator import itemgetter
types_ids = typeBuilder.enviroment.types_dict
hierarchy = [0]*len(types_ids)
for _type in typeBuilder.enviroment.types_list[1:]:
hierarchy[types_ids[_type.name]] = types_ids[_type.parent]
# tag_names = sorted(types_ids.items(), key=itemgetter(1))
ast_cil.typesHierarchy = hierarchy
# ast_cil.tag_names = tag_names
mips_code_generator = MIPSWriterVisitor(ast_cil, fileout)
mips_code_generator.generate_Mips()
if __name__ == '__main__':
main()
| 34.423841 | 111 | 0.50808 | [
"MIT"
] | harry1911/CoolCompiler | src/coolc.py | 5,198 | Python |
import os
from griddly import GymWrapperFactory, gd, GymWrapper
from griddly.RenderTools import VideoRecorder, RenderToFile
if __name__ == "__main__":
wrapper = GymWrapperFactory()
name = "projectiles_env"
current_path = os.path.dirname(os.path.realpath(__file__))
env = GymWrapper(
"health_bars.yaml",
shader_path="shaders",
player_observer_type=gd.ObserverType.SPRITE_2D,
global_observer_type=gd.ObserverType.SPRITE_2D,
level=0,
)
env.reset()
reset_global_obs = env.render(observer="global", mode="rgb_array")
reset_player_obs = env.render(mode="rgb_array")
render_to_file = RenderToFile()
render_to_file.render(reset_global_obs, "reset_global.png")
render_to_file.render(reset_player_obs, "reset_partial.png")
global_recorder = VideoRecorder()
global_visualization = env.render(observer="global", mode="rgb_array")
global_recorder.start("global_video_test.mp4", global_visualization.shape)
for i in range(1000):
obs, reward, done, info = env.step(env.action_space.sample())
env.render(observer="global")
frame = env.render(observer="global", mode="rgb_array")
global_recorder.add_frame(frame)
if done:
env.reset()
global_recorder.close()
| 30.418605 | 78 | 0.700306 | [
"MIT"
] | Thaigun/Griddly | python/examples/Custom Shaders/Health Bars/main.py | 1,308 | Python |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
Any,
AsyncIterable,
Awaitable,
Callable,
Iterable,
Sequence,
Tuple,
Optional,
)
from google.cloud.compute_v1.types import compute
class AggregatedListPager:
"""A pager for iterating through ``aggregated_list`` requests.
This class thinly wraps an initial
:class:`google.cloud.compute_v1.types.TargetInstanceAggregatedList` object, and
provides an ``__iter__`` method to iterate through its
``items`` field.
If there are more pages, the ``__iter__`` method will make additional
``AggregatedList`` requests and continue to iterate
through the ``items`` field on the
corresponding responses.
All the usual :class:`google.cloud.compute_v1.types.TargetInstanceAggregatedList`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., compute.TargetInstanceAggregatedList],
request: compute.AggregatedListTargetInstancesRequest,
response: compute.TargetInstanceAggregatedList,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.compute_v1.types.AggregatedListTargetInstancesRequest):
The initial request object.
response (google.cloud.compute_v1.types.TargetInstanceAggregatedList):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = compute.AggregatedListTargetInstancesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[compute.TargetInstanceAggregatedList]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[Tuple[str, compute.TargetInstancesScopedList]]:
for page in self.pages:
yield from page.items.items()
def get(self, key: str) -> Optional[compute.TargetInstancesScopedList]:
return self._response.items.get(key)
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListPager:
"""A pager for iterating through ``list`` requests.
This class thinly wraps an initial
:class:`google.cloud.compute_v1.types.TargetInstanceList` object, and
provides an ``__iter__`` method to iterate through its
``items`` field.
If there are more pages, the ``__iter__`` method will make additional
``List`` requests and continue to iterate
through the ``items`` field on the
corresponding responses.
All the usual :class:`google.cloud.compute_v1.types.TargetInstanceList`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., compute.TargetInstanceList],
request: compute.ListTargetInstancesRequest,
response: compute.TargetInstanceList,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.compute_v1.types.ListTargetInstancesRequest):
The initial request object.
response (google.cloud.compute_v1.types.TargetInstanceList):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = compute.ListTargetInstancesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[compute.TargetInstanceList]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[compute.TargetInstance]:
for page in self.pages:
yield from page.items
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
| 37.032258 | 89 | 0.675958 | [
"Apache-2.0"
] | Ctfbuster/python-compute | google/cloud/compute_v1/services/target_instances/pagers.py | 5,740 | Python |
import argparse
import sys, os
import logging
from utils.misc import ArgParseDefault, add_bool_arg
USAGE_DESC = """
python main.py <command> [<args>]
Available commands:
init Initialize project
sync Sync project data from S3
parse Preprocessing of data to generate `/data/1_parsed`
sample Sample cleaned data to generate `data/2_sampled`
batch Creates a new batch of tweets from a sampled file in `/data/2_sampled`
clean_labels Clean labels generated from (`data/3_labelled`) and merge/clean to generate `/data/4_cleaned_labels`
stats Output various stats about project
split Splits data into training, dev and test data
prepare_predict Prepares parsed data for prediction with txcl
"""
STATS_USAGE_DESC = """
python main.py stats <command> [<args>]
Available commands:
all Run all
overview Show overview
sample Show sampling stats
annotation Show annotation summary
annotation_cleaned Show cleaned annotation summary
annotator_outliers Show annotator outliers
"""
class ArgParse(object):
def __init__(self):
logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(levelname)-5.5s] [%(name)-12.12s]: %(message)s')
parser = ArgParseDefault(
description='',
usage=USAGE_DESC)
parser.add_argument('command', help='Subcommand to run')
args = parser.parse_args(sys.argv[1:2])
if not hasattr(self, args.command):
print('Unrecognized command')
parser.print_help()
sys.exit(1)
getattr(self, args.command)()
def init(self):
from utils.task_helpers import init
parser = ArgParseDefault(description='Initialize project')
parser.add_argument('-p', '--project', type=str, required=False, default='', dest='project', help='Name of project to initialize')
parser.add_argument('--template', dest='template', action='store_true', default=False, help='Initialize project manually.')
args = parser.parse_args(sys.argv[2:])
init(args.project, args.template)
def sync(self):
from utils.task_helpers import sync
parser = ArgParseDefault(description='Sync project data from S3')
parser.add_argument('-s', '--source', choices=['all', 'streaming', 'annotation', 'media'], required=False, default='all', help='Type of data to be synced. By default sync all data belonging to this project.')
parser.add_argument('-l', '--last', required=False, type=int, help='Sync streaming data of last n days')
args = parser.parse_args(sys.argv[2:])
sync(data_type=args.source, last_n_days=args.last)
def parse(self):
import utils.processing.parse_tweets as parse_tweets
parser = ArgParseDefault(description='Preprocess raw data to create parquet files in `data/1_parsed`')
parser.add_argument('--no-parallel', dest='no_parallel', action='store_true', default=False, help='Do not run in parallel')
parser.add_argument('--extend', dest='extend', action='store_true', default=False, help='Extend existing parsed data')
parser.add_argument('--ray_num_cpus', type=int, default=None, help='Limit the number of worker processes for Ray during the memory intensive merge phase (by default using maximum worker processes)')
add_bool_arg(parser, 'extract_retweets', default=True, help='Extract top-level retweets')
add_bool_arg(parser, 'extract_quotes', default=True, help='Extract top-level quotes')
add_bool_arg(parser, 'omit_last_day', default=True, help='Omit parsing data from the last day')
args = parser.parse_args(sys.argv[2:])
parse_tweets.run(no_parallel=args.no_parallel, extract_retweets=args.extract_retweets, extract_quotes=args.extract_quotes, extend=args.extend, omit_last_day=args.omit_last_day, ray_num_cpus=args.ray_num_cpus)
def sample(self):
import utils.processing.sample_tweets as sample_tweets
parser = ArgParseDefault(description='Sample cleaned data to generate `data/2_sampled`')
parser.add_argument('-s', '--size', type=int, required=True, dest='size', help='Number of tweets to sample')
parser.add_argument('-bs', '--bin_size', type=int, required=False, help='Number of tweets per bin')
parser.add_argument('-m', '--mode', choices=['monthly', 'random'], required=False, default='random', help='Sampling mode. Random: Sample randomly. Monthly: Try to sample evenly within months.')
parser.add_argument('-l', '--langs', default=[], nargs='+', required=False, help='Filter by language(s)')
parser.add_argument('--contains_keywords', default=False, action='store_true', help='Only sample from tweets which include keywords')
parser.add_argument('--min_token_count', default=3, type=int, required=False, help='Minimum number of tokens')
parser.add_argument('--include_replies', default=False, action='store_true', help='Include replies')
parser.add_argument('--seed', type=int, required=False, default=None, help='Random state split')
parser.add_argument('--extend', action='store_true', help='Extending existing sample given by seed by removing already labelled tweets. If size is <= original sample size this has no effect except removing labelled tweets');
add_bool_arg(parser, 'anonymize', default=True, help='Replace usernames and URLs with filler (@user and <url>)')
parser.add_argument('--max_date', required=False, default=None, help='Sample until date (YYYY-MM-DD), default: No max')
parser.add_argument('--min_date', required=False, default=None, help='Sample from date (YYYY-MM-DD), default: No min')
args = parser.parse_args(sys.argv[2:])
sample_tweets.run(size=args.size, contains_keywords=args.contains_keywords, anonymize=args.anonymize, min_token_count=args.min_token_count, langs=args.langs, include_replies=args.include_replies, mode=args.mode, seed=args.seed, extend=args.extend, bin_size=args.bin_size, min_date=args.min_date, max_date=args.max_date)
def batch(self):
from utils.processing.sample_tweets import SampleGenerator
parser = ArgParseDefault(description='Generate new batch for labelling. As a result a new csv will be created in `data/2_sampled/batch_{batch_id}/`')
parser.add_argument('-N', '--num_tweets', type=int, default=None, help='The number of tweets to be generated in new batch')
parser.add_argument('-b', '--batch', type=int, default=None, help='The batch id to be generated, default: Automatically find next batch')
parser.add_argument('--ignore-previous', dest='ignore_previous', action='store_true', default=False, help='Also sample tweets from old batches which were not annotated')
parser.add_argument('--stats-only', dest='stats_only', action='store_true', default=False, help='Show stats only')
args = parser.parse_args(sys.argv[2:])
s = SampleGenerator()
if args.stats_only:
s.stats(ignore_previous=args.ignore_previous)
else:
s.generate_batch(num_tweets=args.num_tweets, batch_id=args.batch, ignore_previous=args.ignore_previous)
def clean_labels(self):
import utils.processing.clean_labels as clean_labels
parser = ArgParseDefault(description='Clean/merge labels from different batches to generate final training input')
parser.add_argument('-s', '--selection-criterion', dest='selection_criterion', choices=['majority', 'unanimous'], required=False, default='majority', help='Can be "majority" (use majority vote) or "unanimous" (only select tweets with perfect agreement)')
parser.add_argument('-l', '--min-labels-cutoff', dest='min_labels_cutoff', type=int, required=False, default=3, help='Discard all tweets having less than min_labels_cutoff annotations')
parser.add_argument('-a', '--selection-agreement', dest='selection_agreement', type=float, required=False, default=None, help='Consider only tweets with a certain level of annotation agreement. If provided overwrites selection_criterion param.')
parser.add_argument('-m', '--mode', choices=['mturk', 'local', 'public', 'other', 'all'], type=str, required=False, default='all', help='Annotation mode which was used. Can be `mturk`, `local`, `public`, `other` or `all`')
parser.add_argument('--is-relevant', dest='is_relevant', action='store_true', help='Filter tweets which have been annotated as relevant/related')
parser.add_argument('--exclude-incorrect', dest='exclude_incorrect', action='store_true', help='Remove annotations which have been manually flagged as incorrect')
parser.add_argument('--cutoff-worker-outliers', dest='cutoff_worker_outliers', type=float, default=None, help='Remove all annotations by workers who have agreement scores below certain Z-score threshold (a reasonable value would be 2 or 3)')
parser.add_argument('--allow-nan', dest='allow_nan', nargs='+', choices=['id', 'text', 'question_id', 'answer_id'], default=[], required=False, help='Allow certain fields to be NaN/empty (by default each annotation has to have the fields id, text, answer_id and question_id)')
parser.add_argument('--contains-keywords', dest='contains_keywords', default=False, action='store_true', help='Remove annotations in which text does not contain keywords')
parser.add_argument('--verbose', dest='verbose', action='store_true', help='Verbose output')
args = parser.parse_args(sys.argv[2:])
clean_labels.run_clean_labels(args.selection_criterion, args.min_labels_cutoff, args.selection_agreement, args.mode, args.is_relevant, args.exclude_incorrect, args.cutoff_worker_outliers, args.allow_nan, args.contains_keywords, args.verbose)
def stats(self):
from utils.task_helpers import stats
parser = ArgParseDefault(description='Output various stats about project', usage=STATS_USAGE_DESC)
parser.add_argument('command', choices=['all', 'overview', 'sample', 'annotation', 'annotator_outliers', 'annotation_cleaned'], help='Subcommand to run')
args = parser.parse_args(sys.argv[2:3])
if args.command == 'annotation':
parser = ArgParseDefault(description='Print stats about annotations')
parser.add_argument('-m', '--mode', choices=['all', 'mturk', 'local', 'public', 'other', '*'], type=str, required=False, default='all', help='Print stats for certain annotation modes only.')
args = parser.parse_args(sys.argv[3:])
stats('annotation', **vars(args))
elif args.command == 'annotator_outliers':
parser = ArgParseDefault(description='Find annotators which have under-performed compared to others')
parser.add_argument('-m', '--mode', choices=['mturk', 'local', 'public', 'other'], type=str, required=False, default='mturk', help='Print stats for certain annotation modes only.')
parser.add_argument('-b', '--batch-name', type=str, required=False, dest='batch_name', default='*', help='Only analyse for specific local/mturk batch name (this looks for a pattern in filename). Default: All data')
parser.add_argument('--agreement-cutoff', dest='agreement_cutoff', type=float, required=False, default=3, help='Z-value cutoff for inter-worker agreement deviation')
parser.add_argument('--time-cutoff', dest='time_cutoff', type=float, required=False, default=3, help='Z-value cutoff for average task duration per worker')
parser.add_argument('--min-tasks', dest='min_tasks', type=int, required=False, default=3, help='Min tasks for worker to have completed before considered as outlier')
parser.add_argument('--min-comparisons-count', dest='min_comparisons_count', type=int, required=False, default=20, help='Min number of questions to compare for a worker needed to compute agreement score')
args = parser.parse_args(sys.argv[3:])
stats('annotator_outliers', **vars(args))
else:
stats(args.command)
def split(self):
from utils.task_helpers import train_dev_test_split
parser = ArgParseDefault(description='Split annotated data into training and test data set')
parser.add_argument('--question', type=str, required=False, default='sentiment', help='Which data to load (has to be a valid question tag)')
parser.add_argument('--name', type=str, required=False, default='', help='In case there are multiple cleaned labelled data output files give name of file (without csv ending), default: No name provided (works only if a single file is present).')
parser.add_argument('--balanced-labels', dest='balanced_labels', action='store_true', default=False, help='Ensure equal label balance')
parser.add_argument('--all-questions', dest='all_questions', action='store_true', default=False, help='Generate files for all available question tags. This overwrites the `question` argument. Default: False.')
parser.add_argument('--label-tags', dest='label_tags', required=False, default=[], nargs='+', help='Only select examples with certain label tags')
parser.add_argument('--has-label', dest='has_label', required=False, default='', help='Only select examples which have also been tagged with certain label')
parser.add_argument('--dev-size', dest='dev_size', type=float, required=False, default=0.2, help='Fraction of dev size')
parser.add_argument('--test-size', dest='test_size', type=float, required=False, default=0.2, help='Fraction of test size')
parser.add_argument('--seed', type=int, required=False, default=42, help='Random state split')
args = parser.parse_args(sys.argv[2:])
train_dev_test_split(question=args.question, dev_size=args.dev_size, test_size=args.test_size, seed=args.seed, name=args.name, balanced_labels=args.balanced_labels, all_questions=args.all_questions, label_tags=args.label_tags, has_label=args.has_label)
def prepare_predict(self):
from utils.task_helpers import prepare_predict
parser = ArgParseDefault(description='Prepare data for prediction with the text-classification library. \
This function generates two files (1 for text 1 for IDs/created_at) under data/other. The text.csv file can then be predicted.')
parser.add_argument('--start_date', required=False, default=None, help='Filter start date')
parser.add_argument('--end_date', required=False, default=None, help='Filter end date')
add_bool_arg(parser, 'anonymize', default=True, help='Replace usernames and URLs with filler (@user and <url>)')
parser.add_argument('--url_filler', required=False, default='<url>', help='Filler for urls (if anonymize)')
parser.add_argument('--user_filler', required=False, default='@user', help='Filler for user names (if anonymize)')
args = parser.parse_args(sys.argv[2:])
prepare_predict(args)
if __name__ == '__main__':
ArgParse()
| 85.954545 | 327 | 0.708554 | [
"MIT"
] | crowdbreaks/preprocess | main.py | 15,128 | Python |
import os
import subprocess
from tempfile import NamedTemporaryFile
from torch.distributed import get_rank
from torch.distributed import get_world_size
from torch.utils.data.sampler import Sampler
import librosa
import numpy as np
import scipy.signal
import torch
from scipy.io.wavfile import read
import math
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from .spec_augment import spec_augment
from hangul_utils import split_syllable_char, split_syllables, join_jamos
windows = {'hamming': scipy.signal.hamming, 'hann': scipy.signal.hann, 'blackman': scipy.signal.blackman,
'bartlett': scipy.signal.bartlett}
def load_audio(path):
# sample_rate, sound = read(path)
sound, sr = librosa.load(path, sr=16000)
# librosa.output.write_wav('org.wav', sound, sr)
# print('save 1')
# sound = sound.astype('float32') / 32767 # normalize audio
sound = librosa.util.normalize(sound) # normalize audio
sound = sound.astype('float32')
# librosa.output.write_wav('norm.wav', sound, sr)
# print('save 2')
if len(sound.shape) > 1:
if sound.shape[1] == 1:
sound = sound.squeeze()
else:
sound = sound.mean(axis=1) # multiple channels, average
return sound
class AudioParser(object):
def parse_transcript(self, transcript_path):
"""
:param transcript_path: Path where transcript is stored from the manifest file
:return: Transcript in training/testing format
"""
raise NotImplementedError
def parse_audio(self, audio_path):
"""
:param audio_path: Path where audio is stored from the manifest file
:return: Audio in training/testing format
"""
raise NotImplementedError
class NoiseInjection(object):
def __init__(self,
path=None,
sample_rate=16000,
noise_levels=(0, 0.5)):
"""
Adds noise to an input signal with specific SNR. Higher the noise level, the more noise added.
Modified code from https://github.com/willfrey/audio/blob/master/torchaudio/transforms.py
"""
if path is not None and not os.path.exists(path):
print("Directory doesn't exist: {}".format(path))
raise IOError
self.paths = path is not None and librosa.util.find_files(path)
self.sample_rate = sample_rate
self.noise_levels = noise_levels
def inject_noise(self, data):
noise_path = np.random.choice(self.paths)
noise_level = np.random.uniform(*self.noise_levels)
return self.inject_noise_sample(data, noise_path, noise_level)
def inject_noise_sample(self, data, noise_path, noise_level):
noise_len = get_audio_length(noise_path)
data_len = len(data) / self.sample_rate
noise_start = np.random.rand() * (noise_len - data_len)
noise_end = noise_start + data_len
noise_dst = audio_with_sox(noise_path, self.sample_rate, noise_start, noise_end)
assert len(data) == len(noise_dst)
noise_energy = np.sqrt(noise_dst.dot(noise_dst) / noise_dst.size)
data_energy = np.sqrt(data.dot(data) / data.size)
data += noise_level * noise_dst * data_energy / noise_energy
return data
class SpectrogramParser(AudioParser):
def __init__(self, audio_conf, normalize=False, speed_volume_perturb=False, spec_augment=False):
"""
Parses audio file into spectrogram with optional normalization and various augmentations
:param audio_conf: Dictionary containing the sample rate, window and the window length/stride in seconds
:param normalize(default False): Apply standard mean and deviation normalization to audio tensor
:param speed_volume_perturb(default False): Apply random tempo and gain perturbations
:param spec_augment(default False): Apply simple spectral augmentation to mel spectograms
"""
super(SpectrogramParser, self).__init__()
self.window_stride = audio_conf['window_stride']
self.window_size = audio_conf['window_size']
self.sample_rate = audio_conf['sample_rate']
self.window = windows.get(audio_conf['window'], windows['hamming'])
self.normalize = normalize
self.speed_volume_perturb = speed_volume_perturb
self.spec_augment = spec_augment
self.noiseInjector = NoiseInjection(audio_conf['noise_dir'], self.sample_rate,
audio_conf['noise_levels']) if audio_conf.get(
'noise_dir') is not None else None
self.noise_prob = audio_conf.get('noise_prob')
def parse_audio(self, audio_path,audio=None,change_speed=None):
if audio is not None:
y = audio
elif self.speed_volume_perturb:
y = load_randomly_augmented_audio(audio_path, self.sample_rate)
# librosa.output.write_wav('test.wav', y, sr=16000, norm=False)
# print('test')
else:
y = load_audio(audio_path)
# librosa.output.write_wav('y1.wav', y, sr=16000)
# print('save@@@@@@@@@@@@')
# change audio speed
if change_speed is not None:
y = librosa.effects.time_stretch(y, change_speed)
if self.noiseInjector:
add_noise = np.random.binomial(1, self.noise_prob)
if add_noise:
y = self.noiseInjector.inject_noise(y)
# librosa.output.write_wav('y2.wav', y, sr=16000)
# print('save@@@@@@@@@@@@')
# import sys
# sys.exit()
n_fft = int(self.sample_rate * self.window_size)
win_length = n_fft
hop_length = int(self.sample_rate * self.window_stride)
# STFT
D = librosa.stft(y, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=self.window)
spect, phase = librosa.magphase(D)
# S = log(S+1)
spect = np.log1p(spect)
spect = torch.FloatTensor(spect)
if self.normalize:
mean = spect.mean()
std = spect.std()
spect.add_(-mean)
spect.div_(std)
if self.spec_augment:
spect = spec_augment(spect)
return spect
def parse_transcript(self, transcript_path):
raise NotImplementedError
class SpectrogramDataset(Dataset, SpectrogramParser):
def __init__(self, audio_conf, manifest_filepath, labels, normalize=False, speed_volume_perturb=False, spec_augment=False):
"""
Dataset that loads tensors via a csv containing file paths to audio files and transcripts separated by
a comma. Each new line is a different sample. Example below:
/path/to/audio.wav,/path/to/audio.txt
...
:param audio_conf: Dictionary containing the sample rate, window and the window length/stride in seconds
:param manifest_filepath: Path to manifest csv as describe above
:param labels: String containing all the possible characters to map to
:param normalize: Apply standard mean and deviation normalization to audio tensor
:param speed_volume_perturb(default False): Apply random tempo and gain perturbations
:param spec_augment(default False): Apply simple spectral augmentation to mel spectograms
"""
with open(manifest_filepath) as f:
ids = f.readlines()
ids = [x.strip().split(',') for x in ids]
self.ids = ids
self.size = len(ids)
self.labels_map = dict([(labels[i], i) for i in range(len(labels))])
try:
self.use_jamo = audio_conf['use_jamo']
except:
self.use_jamo = False
super(SpectrogramDataset, self).__init__(audio_conf, normalize, speed_volume_perturb, spec_augment)
def __getitem__(self, index):
sample = self.ids[index]
audio_path, transcript_path = sample[0], sample[1]
spect = self.parse_audio(audio_path)
transcript = self.parse_transcript(transcript_path)
return spect, transcript
def parse_transcript(self, transcript_path):
with open(transcript_path, 'r', encoding='utf8') as transcript_file:
# with open(transcript_path, 'r', encoding='utf-16') as transcript_file:
transcript = transcript_file.read().replace('\n', '')
if self.use_jamo:
transcript = split_syllables(transcript)
transcript = list(filter(None, [self.labels_map.get(x) for x in list(transcript)]))
return transcript
def __len__(self):
return self.size
def _collate_fn(batch):
def func(p):
return p[0].size(1)
batch = sorted(batch, key=lambda sample: sample[0].size(1), reverse=True)
longest_sample = max(batch, key=func)[0]
freq_size = longest_sample.size(0)
minibatch_size = len(batch)
max_seqlength = longest_sample.size(1)
inputs = torch.zeros(minibatch_size, 1, freq_size, max_seqlength)
input_percentages = torch.FloatTensor(minibatch_size)
target_sizes = torch.IntTensor(minibatch_size)
targets = []
for x in range(minibatch_size):
sample = batch[x]
tensor = sample[0]
target = sample[1]
seq_length = tensor.size(1)
inputs[x][0].narrow(1, 0, seq_length).copy_(tensor)
input_percentages[x] = seq_length / float(max_seqlength)
target_sizes[x] = len(target)
targets.extend(target)
targets = torch.IntTensor(targets)
return inputs, targets, input_percentages, target_sizes
class AudioDataLoader(DataLoader):
def __init__(self, *args, **kwargs):
"""
Creates a data loader for AudioDatasets.
"""
super(AudioDataLoader, self).__init__(*args, **kwargs)
self.collate_fn = _collate_fn
class BucketingSampler(Sampler):
def __init__(self, data_source, batch_size=1):
"""
Samples batches assuming they are in order of size to batch similarly sized samples together.
"""
super(BucketingSampler, self).__init__(data_source)
self.data_source = data_source
ids = list(range(0, len(data_source)))
self.bins = [ids[i:i + batch_size] for i in range(0, len(ids), batch_size)]
def __iter__(self):
for ids in self.bins:
np.random.shuffle(ids)
yield ids
def __len__(self):
return len(self.bins)
def shuffle(self, epoch):
np.random.shuffle(self.bins)
class DistributedBucketingSampler(Sampler):
def __init__(self, data_source, batch_size=1, num_replicas=None, rank=None):
"""
Samples batches assuming they are in order of size to batch similarly sized samples together.
"""
super(DistributedBucketingSampler, self).__init__(data_source)
if num_replicas is None:
num_replicas = get_world_size()
if rank is None:
rank = get_rank()
self.data_source = data_source
self.ids = list(range(0, len(data_source)))
self.batch_size = batch_size
self.bins = [self.ids[i:i + batch_size] for i in range(0, len(self.ids), batch_size)]
self.num_replicas = num_replicas
self.rank = rank
self.num_samples = int(math.ceil(len(self.bins) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
def __iter__(self):
offset = self.rank
# add extra samples to make it evenly divisible
bins = self.bins + self.bins[:(self.total_size - len(self.bins))]
assert len(bins) == self.total_size
samples = bins[offset::self.num_replicas] # Get every Nth bin, starting from rank
return iter(samples)
def __len__(self):
return self.num_samples
def shuffle(self, epoch):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(epoch)
bin_ids = list(torch.randperm(len(self.bins), generator=g))
self.bins = [self.bins[i] for i in bin_ids]
def get_audio_length(path):
output = subprocess.check_output(['soxi -D \"%s\"' % path.strip()], shell=True)
return float(output)
def audio_with_sox(path, sample_rate, start_time, end_time):
"""
crop and resample the recording with sox and loads it.
"""
with NamedTemporaryFile(suffix=".wav") as tar_file:
tar_filename = tar_file.name
sox_params = "sox \"{}\" -r {} -c 1 -b 16 -e si {} trim {} ={} >/dev/null 2>&1".format(path, sample_rate,
tar_filename, start_time,
end_time)
os.system(sox_params)
y = load_audio(tar_filename)
return y
def augment_audio_with_sox(path, sample_rate, tempo, gain):
"""
Changes tempo and gain of the recording with sox and loads it.
"""
with NamedTemporaryFile(suffix=".wav") as augmented_file:
augmented_filename = augmented_file.name
sox_augment_params = ["tempo", "{:.3f}".format(tempo), "gain", "{:.3f}".format(gain)]
sox_params = "sox \"{}\" -r {} -c 1 -b 16 -e si {} {} >/dev/null 2>&1".format(path, sample_rate,
augmented_filename,
" ".join(sox_augment_params))
os.system(sox_params)
y = load_audio(augmented_filename)
return y
# original tempo_range=(0.85,1.15)
# original gain_range=(-6,8)
def load_randomly_augmented_audio(path, sample_rate=16000, tempo_range=(0.85,1.15),
gain_range=(-6, 8)):
"""
Picks tempo and gain uniformly, applies it to the utterance by using sox utility.
Returns the augmented utterance.
"""
low_tempo, high_tempo = tempo_range
tempo_value = np.random.uniform(low=low_tempo, high=high_tempo)
low_gain, high_gain = gain_range
gain_value = np.random.uniform(low=low_gain, high=high_gain)
audio = augment_audio_with_sox(path=path, sample_rate=sample_rate,
tempo=tempo_value, gain=gain_value)
return audio
| 39.149051 | 127 | 0.631524 | [
"MIT"
] | ShuanDeMorian/deepspeech.pytorch | data/data_loader.py | 14,446 | Python |
# -*- coding: utf8 -*-
def filter_event(event, happening_before):
"""Check if the following keys are present. These
keys only show up when using the API. If fetching
from the iCal, JSON, or RSS feeds it will just compare
the dates
"""
status = True
visibility = True
actions = True
if 'status' in event:
status = event['status'] == 'upcoming'
if 'visibility' in event:
visibility = event['visibility'] == 'public'
if 'self' in event:
actions = 'announce' not in event['self']['actions']
return (status and visibility and actions and
event['time'] < happening_before)
| 28.304348 | 60 | 0.6298 | [
"MIT"
] | OpenTwinCities/site_bot | app/Meetup/Filter.py | 651 | Python |
import csv
from pathlib import Path
import torch
import pandas
import numpy as np
from utils import peek, load_json, dump_json
from .module import ContrastiveModule
from mps import distributed as du
from save import format_rows
def get_penultimates(keys):
penultimates = {}
for key in keys:
view = key[:key.find('_')] # get dataset+model name
layer_name = key[key.find('_') + 1:]
if view not in penultimates:
penultimates[view] = view + '_' + layer_name
elif layer_name > penultimates[view]:
penultimates[view] = view + '_' + layer_name
keys = sorted(list(penultimates.keys()))
return [penultimates[k] for k in keys]
def get_optimizer(params, lr=1e-3):
optimizer = torch.optim.AdamW(
params,
lr=lr,
betas=(0.9, 0.999),
eps=1e-6,
amsgrad=True,
)
return optimizer
def set_lr(optimizer, lr):
for param in optimizer.param_groups:
param['lr'] = lr
return optimizer
def lr_func_linear(current_step, num_training_steps, num_warmup_steps=3):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
return max(0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps)))
def update_lr(optimizer, epoch, num_epochs, base_lr=1e-3, num_warmup_steps=3):
lr = lr_func_linear(epoch + 1, num_epochs + 1, num_warmup_steps) * base_lr
optimizer = set_lr(optimizer, lr)
return optimizer, lr
class Contrastive:
def __init__(self, num_epochs=1, device='cpu', base_lr=1e-4,
num_warmup_steps=3, distributed=False):
self.num_epochs = num_epochs
self.device = device
self.base_lr = base_lr
self.num_warmup_steps = num_warmup_steps
self.distributed = distributed
self.epoch = 0
# sizes = self.get_sizes(train)
sizes = self.default_sizes
self.model = ContrastiveModule(*sizes, use_global_batch=distributed)
self.model = self.model.to(self.device)
def init(self, clustering_combinations, candidates):
pass
@property
def default_sizes(self):
# video (slowfast) : 2304, audio (VGGish) : 128
return [2304, 128]
def get_sizes(self, train):
class_data = peek(train)
row = class_data[0]
penultimates = get_penultimates(list(row['features'].keys()))
return [row['features'][k].shape[-1] for k in penultimates]
def get_feature_names(self, train):
class_data = peek(train)
row = peek(class_data)
return sorted(list(row.keys()))
def train_batch(self, batch, optimizer):
moved = []
for feature in batch:
moved.append(feature.to(self.device))
loss, acc = self.model(*moved)
loss.backward()
if self.distributed:
self.model.average_gradient()
optimizer.step()
return loss.item(), acc.item()
def _get_features(self, batch):
unique_ids = pandas.Series(batch['idx']).drop_duplicates().index.tolist()
filenames = [batch['filename'][idx] for idx in unique_ids]
ids = [batch['idx'][idx] for idx in unique_ids]
shard_names = [batch['shard_name'][idx] for idx in unique_ids]
metas = [{'id': idx, 'filename': filename, 'shard_name': shard_name}
for idx, filename, shard_name in zip(ids, filenames, shard_names)]
video_features = batch['SLOWFAST_8x8_R50/kinetics-400']['layer_4']
audio_features = batch['VGGish/YouTube-8M']['layer_4']
unique_ids = torch.Tensor(unique_ids).long()
video_features = video_features.index_select(dim=0, index=unique_ids)
audio_features = audio_features.index_select(dim=0, index=unique_ids)
return metas, [video_features, audio_features]
def get_features(self, batch):
metas, [video_features, audio_features] = self._get_features(batch)
if self.distributed:
i = du.get_rank()
total = du.get_world_size()
metas = metas[i::total]
video_features = video_features[i::total]
audio_features = audio_features[i::total]
return metas, [video_features, audio_features]
def train(self, args, path, dataloader, log_every=1, verbose=True):
self.model.train()
optimizer = get_optimizer(self.model.parameters(), self.base_lr)
for epoch in range(self.epoch, self.num_epochs):
optimizer, lr = update_lr(optimizer, epoch, self.num_epochs, self.base_lr,
self.num_warmup_steps)
epoch_loss = []
epoch_acc = []
pbar = dataloader
for count, batch in enumerate(pbar):
_, features = self.get_features(batch)
loss, acc = self.train_batch(features, optimizer)
epoch_loss.append(loss)
epoch_acc.append(acc)
if verbose and count % log_every == 0:
print("(node {}) training epoch ({}/{}) iter ({}/{}) (lr: {:04f}, loss: {:04f}, acc: {:04f})".format(
du.get_rank(), epoch, self.num_epochs, count, len(dataloader), lr, loss, acc))
epoch_loss = np.array(epoch_loss).mean()
epoch_acc = np.array(epoch_acc).mean()
if verbose:
print("(node {}) epoch ({}/{}) done (lr: {:04f}, loss: {:04f}, acc: {:04f})".format(
du.get_rank(), epoch, self.num_epochs, lr, epoch_loss, epoch_acc))
self.epoch = epoch
self.save_cache(args, path, epoch, verbose)
return
def get_cache_path_run(self, args, epoch):
cache_dir = args.data.output.path.parent / 'caches'
cache_dir.mkdir(parents=True, exist_ok=True)
pid = args.parent_pid
rank = args.node_rank
i = args.chunk_num
name = "contrastive_model_cache_epoch_{}_{}_{}_{}.pkl".format(epoch, pid, rank, i)
path = str(cache_dir / name)
key_name = "contrastive_model_cache_epoch_{}_{}_{}_{}.json".format(epoch, pid, rank, i)
key_path = str(cache_dir / key_name)
return path, key_path
def get_cache_path_load(self, args, path, epoch):
cache_dir = args.data.output.path.parent / 'caches'
cache_dir.mkdir(parents=True, exist_ok=True)
keys = list(cache_dir.glob("contrastive_model_cache_epoch_{}_*.json".format(epoch)))
if len(keys) == 0:
return None
keys = {p.stem: set(load_json(p)) for p in keys}
path = set([Path(p).stem for p in path])
intersections = [(k, len(v & path)) for k, v in keys.items() if len(path - v) == 0]
if len(intersections) == 0:
return None
key = max(intersections, key=lambda x: x[1])[0]
path = cache_dir / key
path = path.parent / (path.stem + '.pkl')
return path
def save_cache(self, args, chunks, epoch, verbose=True):
path, key_path = self.get_cache_path_run(args, epoch)
dt = {
'epoch': self.epoch,
'base_lr': self.base_lr,
'model': self.model.state_dict()
}
if verbose:
print("saved cache file: {}".format(Path(path).stem))
torch.save(dt, path)
keys = [Path(p).stem for p in chunks]
dump_json(keys, key_path)
def load_cache(self, args, path, epoch):
path = self.get_cache_path_load(args, path, epoch)
assert path is not None, 'no cache file'
dt = torch.load(path)
self.epoch = dt['epoch']
self.base_lr = dt['base_lr']
self.model.load_state_dict(dt['model'])
def infer_batch(self, batch):
moved = []
for feature in batch:
moved.append(feature.to(self.device))
logits = self.model.infer(*moved)
return logits.detach().cpu()
def infer(self, args, dataloader, json_metas, subset_size, log_every=1, verbose=True):
self.model.eval()
with torch.no_grad():
logits, filename_ids = self._infer(args, dataloader, json_metas, log_every, verbose)
if subset_size > logits.shape[0]:
subset_size = logits.shape[0]
scores, ids = logits.topk(subset_size, sorted=True)
return scores, ids, filename_ids
def _infer(self, args, dataloader, json_metas, log_every=1, verbose=True):
logits = []
pbar = dataloader
metas = []
for count, batch in enumerate(pbar):
batch_metas, features = self.get_features(batch)
logit = self.infer_batch(features)
logits.append(logit)
metas.extend(batch_metas)
if verbose and count % log_every == 0:
print("inference iter ({}/{}) saving caches".format(count, len(dataloader)))
logits = torch.cat(logits, dim=0)
self.save_inference(args, logits, metas, json_metas)
logits = []
metas = []
if len(metas) > 0:
logits = torch.cat(logits, dim=0)
self.save_inference(args, logits, metas, json_metas)
print("done: inference iter ({}/{}) saving caches".format(count, len(dataloader)))
return logits, metas
def save_inference(self, args, logits, metas, json_metas):
cache_dir = args.data.output.path.parent / 'caches'
cache_dir.mkdir(parents=True, exist_ok=True)
pid = args.parent_pid
local_rank = du.get_rank()
output_name = Path(args.data.output.path).stem
name = "{}_contrastive_inferred_cache_{}_{}.csv".format(output_name, pid, local_rank)
scores = logits.numpy().tolist()
rows = [{'score': score, **v} for score, v in zip(scores, metas)]
lines = format_rows(rows, json_metas, sharded_meta=True,
headers=['score', 'shard_name', 'filename', 'id', 'segment'])
print("saving cache to {}".format(cache_dir / name))
with open(cache_dir / name, 'a+') as f:
writer = csv.writer(f)
for line in lines:
writer.writerow(line)
| 39.692607 | 121 | 0.605921 | [
"MIT"
] | JiwanChung/acav100m | subset_selection/code/measures/contrastive/contrastive.py | 10,201 | Python |
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test pricecoind with different proxy configuration.
Test plan:
- Start pricecoind's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on pricecoind side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create pricecoinds that connect to them
- Manipulate the pricecoinds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
"""
import socket
import os
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
PORT_MIN,
PORT_RANGE,
assert_equal,
)
from test_framework.netutil import test_ipv6_local
RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports
class ProxyTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
def setup_nodes(self):
self.have_ipv6 = test_ipv6_local()
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
if self.have_ipv6:
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
else:
self.log.warning("Testing without local IPv6 support")
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
if self.have_ipv6:
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
args = [
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
[]
]
if self.have_ipv6:
args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
self.add_nodes(self.num_nodes, extra_args=args)
self.start_nodes()
def node_test(self, node, proxies, auth, test_onion=True):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if self.have_ipv6:
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if test_onion:
# Test: outgoing onion connection through node
node.addnode("bitcoinostk4e4re.onion:9333", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"bitcoinostk4e4re.onion")
assert_equal(cmd.port, 9333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:9333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"node.noumenon")
assert_equal(cmd.port, 9333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), len(rv))
if self.have_ipv6:
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
def networks_dict(d):
r = {}
for x in d['networks']:
r[x['name']] = x
return r
# test RPC getnetworkinfo
n0 = networks_dict(self.nodes[0].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n0[net]['proxy_randomize_credentials'], True)
assert_equal(n0['onion']['reachable'], True)
n1 = networks_dict(self.nodes[1].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n1[net]['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n1['onion']['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['reachable'], True)
n2 = networks_dict(self.nodes[2].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n2[net]['proxy_randomize_credentials'], True)
assert_equal(n2['onion']['reachable'], True)
if self.have_ipv6:
n3 = networks_dict(self.nodes[3].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
assert_equal(n3[net]['proxy_randomize_credentials'], False)
assert_equal(n3['onion']['reachable'], False)
if __name__ == '__main__':
ProxyTest().main()
| 41.366337 | 121 | 0.625299 | [
"MIT"
] | barrystyle/Pricecoin | test/functional/feature_proxy.py | 8,356 | Python |
# -*- coding: utf-8 -*-
"""
Copyright (c) 2021 Showa Denko Materials co., Ltd. All rights reserved.
This software is for non-profit use only.
THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THIS SOFTWARE OR THE USE OR OTHER DEALINGS IN THIS SOFTWARE.
"""
import time
import numpy as np
from GPyOpt.core.task.objective import Objective
class MultiObjective(Objective):
"""
Class to handle problems with multiple objective functions.
param func: objective function.
param n_obj: number of objective functions
param num_cores: number of cores to use in the process of evaluating the objective (default, 1).
param objective_name: name of the objective function.
param batch_type: Type of batch used. Only 'synchronous' evaluations are possible at the moment.
param space: Not in use.
"""
def __init__(self, func, n_obj, num_cores = 1, objective_name = 'no_name', batch_type = 'synchronous', space = None):
self.func = func
self.n_procs = num_cores
self.num_evaluations = 0
self.space = space
self.objective_name = objective_name
self.n_obj = n_obj
def evaluate(self, x):
"""
Performs the evaluation of the objective at x.
"""
f_evals, cost_evals = self._eval_func(x)
return f_evals, cost_evals
def _eval_func(self, x):
"""
Performs sequential evaluations of the function at x (single location or batch). The computing time of each
evaluation is also provided.
"""
cost_evals = []
f_evals = np.empty(shape=[0, self.n_obj])
for i in range(x.shape[0]):
st_time = time.time()
rlt = self.func(np.atleast_2d(x[i]))
f_evals = np.vstack([f_evals,rlt])
cost_evals += [time.time()-st_time]
return f_evals, cost_evals
| 34.875 | 121 | 0.673835 | [
"MIT",
"Unlicense"
] | wilsongis/3DP_Experiments | Samples/codes/matopt_review/add_objective.py | 2,232 | Python |
"""
Given a rod of length n inches and an array of prices
that includes prices of all pieces of size smaller than n.
Determine the maximum value obtainable by cutting up the rod and
selling the pieces. For example, if the length of the rod is 8
and the values of different pieces are given as the following,
then the maximum obtainable value is 22 (by cutting in two pieces of lengths 2 and 6)
length | 1 2 3 4 5 6 7 8
--------------------------------------------
price | 1 5 8 9 10 17 17 20
In unbounded knapsack their is only one change compared to 0/1 knapsack i.e
****dp[i][j-wt[n-1]]****
wt arr => len arr
val arr => price arr
W => L
"""
def RodCutting(larr, parr, L):
n = len(larr)
dp = [[0 for j in range(L+1)]for i in range(n+1)]
for i in range(1, n+1):
for j in range(1, L+1):
if larr[i-1] <= j:
dp[i][j] = max(parr[i-1]+dp[i][j-larr[i-1]], dp[i-1][j])
else:
dp[i][j] = dp[i-1][j]
print(dp)
return dp[n][L]
print(RodCutting([1, 2, 3, 4, 5, 6, 7, 8], [9, 5, 8, 9, 10, 17, 17, 20], 8))
| 27.463415 | 86 | 0.55595 | [
"MIT"
] | Saicharan67/Interview-Coding-Questions | DynamicProgramming/UnBoundedKnapSack/RodCutting.py | 1,126 | Python |
"""Classes for validating data passed to the annotations API."""
import copy
import colander
from dateutil.parser import parse
from pyramid import i18n
from h.schemas.base import JSONSchema, ValidationError
from h.search.query import LIMIT_DEFAULT, LIMIT_MAX, OFFSET_MAX
from h.search.util import wildcard_uri_is_valid
from h.util import document_claims
_ = i18n.TranslationStringFactory(__package__)
def _validate_wildcard_uri(node, value):
"""Raise if wildcards are within the domain of the uri."""
for val in value:
if not wildcard_uri_is_valid(val):
raise colander.Invalid(
node,
"""Wildcards (_ and *) are not permitted within the
domain of wildcard_uri""",
)
class AnnotationSchema(JSONSchema):
"""Validate an annotation object."""
schema = {
"type": "object",
"properties": {
"document": {
"type": "object",
"properties": {
"dc": {
"type": "object",
"properties": {
"identifier": {"type": "array", "items": {"type": "string"}}
},
},
"highwire": {
"type": "object",
"properties": {
"doi": {"type": "array", "items": {"type": "string"}},
"pdf_url": {"type": "array", "items": {"type": "string"}},
},
},
"link": {
"type": "array",
"items": {
"type": "object",
"properties": {
"href": {"type": "string"},
"type": {"type": "string"},
},
"required": ["href"],
},
},
},
},
"group": {"type": "string"},
"permissions": {
"title": "Permissions",
"description": "Annotation action access control list",
"type": "object",
"patternProperties": {
"^(admin|delete|read|update)$": {
"type": "array",
"items": {"type": "string", "pattern": "^(acct:|group:).+$"},
}
},
"required": ["read"],
},
"references": {"type": "array", "items": {"type": "string"}},
"tags": {"type": "array", "items": {"type": "string"}},
"target": {
"type": "array",
"items": {
"type": "object",
"properties": {
"selector": {
"type": "array",
"items": {
"type": "object",
"properties": {"type": {"type": "string"}},
"required": ["type"],
},
}
},
},
},
"text": {"type": "string"},
"uri": {"type": "string"},
},
}
class CreateAnnotationSchema:
"""Validate the POSTed data of a create annotation request."""
def __init__(self, request):
self.structure = AnnotationSchema()
self.request = request
def validate(self, data):
appstruct = self.structure.validate(data)
new_appstruct = {}
_remove_protected_fields(appstruct)
new_appstruct["userid"] = self.request.authenticated_userid
uri = appstruct.pop("uri", "").strip()
if not uri:
raise ValidationError("uri: " + _("'uri' is a required property"))
new_appstruct["target_uri"] = uri
new_appstruct["text"] = appstruct.pop("text", "")
new_appstruct["tags"] = appstruct.pop("tags", [])
new_appstruct["groupid"] = appstruct.pop("group", "__world__")
new_appstruct["references"] = appstruct.pop("references", [])
if "permissions" in appstruct:
new_appstruct["shared"] = _shared(
appstruct.pop("permissions"), new_appstruct["groupid"]
)
else:
new_appstruct["shared"] = False
if "target" in appstruct:
new_appstruct["target_selectors"] = _target_selectors(
appstruct.pop("target")
)
# Replies always get the same groupid as their parent. The parent's
# groupid is added to the reply annotation later by the storage code.
# Here we just delete any group sent by the client from replies.
if new_appstruct["references"] and "groupid" in new_appstruct:
del new_appstruct["groupid"]
new_appstruct["document"] = _document(
appstruct.pop("document", {}), new_appstruct["target_uri"]
)
new_appstruct["extra"] = appstruct
return new_appstruct
class UpdateAnnotationSchema:
"""Validate the POSTed data of an update annotation request."""
def __init__(self, request, existing_target_uri, groupid):
self.request = request
self.existing_target_uri = existing_target_uri
self.groupid = groupid
self.structure = AnnotationSchema()
def validate(self, data):
appstruct = self.structure.validate(data)
new_appstruct = {}
_remove_protected_fields(appstruct)
# Some fields are not allowed to be changed in annotation updates.
for key in ["group", "groupid", "userid", "references"]:
appstruct.pop(key, "")
# Fields that are allowed to be updated and that have a different name
# internally than in the public API.
if "uri" in appstruct:
new_uri = appstruct.pop("uri").strip()
if not new_uri:
raise ValidationError("uri: " + _("'uri' is a required property"))
new_appstruct["target_uri"] = new_uri
if "permissions" in appstruct:
new_appstruct["shared"] = _shared(
appstruct.pop("permissions"), self.groupid
)
if "target" in appstruct:
new_appstruct["target_selectors"] = _target_selectors(
appstruct.pop("target")
)
# Fields that are allowed to be updated and that have the same internal
# and external name.
for key in ["text", "tags"]:
if key in appstruct:
new_appstruct[key] = appstruct.pop(key)
if "document" in appstruct:
new_appstruct["document"] = _document(
appstruct.pop("document"),
new_appstruct.get("target_uri", self.existing_target_uri),
)
new_appstruct["extra"] = appstruct
return new_appstruct
def _document(document, claimant):
"""
Return document meta and document URI data from the given document dict.
Transforms the "document" dict that the client posts into a convenient
format for creating DocumentURI and DocumentMeta objects later.
"""
document = document or {}
document_uri_dicts = document_claims.document_uris_from_data(
copy.deepcopy(document), claimant=claimant
)
document_meta_dicts = document_claims.document_metas_from_data(
copy.deepcopy(document), claimant=claimant
)
return {
"document_uri_dicts": document_uri_dicts,
"document_meta_dicts": document_meta_dicts,
}
def _format_jsonschema_error(error):
"""Format a :py:class:`jsonschema.ValidationError` as a string."""
if error.path:
dotted_path = ".".join([str(c) for c in error.path])
return "{path}: {message}".format(path=dotted_path, message=error.message)
return error.message
def _remove_protected_fields(appstruct):
# Some fields are not to be set by the user, ignore them.
for field in [
"created",
"updated",
"user",
"id",
"links",
"flagged",
"hidden",
"moderation",
"user_info",
]:
appstruct.pop(field, None)
def _shared(permissions, groupid):
"""
Return True if the given permissions object represents shared permissions.
Return False otherwise.
Reduces the client's complex permissions dict to a simple shared boolean.
:param permissions: the permissions dict sent by the client in an
annotation create or update request
:type permissions: dict
:param groupid: the groupid of the annotation that the permissions dict
applies to
:type groupid: unicode
"""
return permissions["read"] == ["group:{id}".format(id=groupid)]
def _target_selectors(targets):
"""
Return the target selectors from the given target list.
Transforms the target lists that the client sends in annotation create and
update requests into our internal target_selectors format.
"""
# Any targets other than the first in the list are discarded.
# Any fields of the target other than 'selector' are discarded.
if targets and "selector" in targets[0]:
return targets[0]["selector"]
return []
class SearchParamsSchema(colander.Schema):
_separate_replies = colander.SchemaNode(
colander.Boolean(),
missing=False,
description="Return a separate set of annotations and their replies.",
)
sort = colander.SchemaNode(
colander.String(),
validator=colander.OneOf(["created", "updated", "group", "id", "user"]),
missing="updated",
description="The field by which annotations should be sorted.",
)
search_after = colander.SchemaNode(
colander.String(),
missing=colander.drop,
description="""Returns results after the annotation who's sort field
has this value. If specifying a date use the format
yyyy-MM-dd'T'HH:mm:ss.SSX or time in miliseconds since the
epoch. This is used for iteration through large collections
of results.""",
)
limit = colander.SchemaNode(
colander.Integer(),
validator=colander.Range(min=0, max=LIMIT_MAX),
missing=LIMIT_DEFAULT,
description="The maximum number of annotations to return.",
)
order = colander.SchemaNode(
colander.String(),
validator=colander.OneOf(["asc", "desc"]),
missing="desc",
description="The direction of sort.",
)
offset = colander.SchemaNode(
colander.Integer(),
validator=colander.Range(min=0, max=OFFSET_MAX),
missing=0,
description="""The number of initial annotations to skip. This is
used for pagination. Not suitable for paging through
thousands of annotations-search_after should be used
instead.""",
)
group = colander.SchemaNode(
colander.String(),
missing=colander.drop,
description="Limit the results to this group of annotations.",
)
quote = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
missing=colander.drop,
description="""Limit the results to annotations that contain this text inside
the text that was annotated.""",
)
references = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
missing=colander.drop,
description="""Returns annotations that are replies to this parent annotation id.""",
)
tag = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
missing=colander.drop,
description="Limit the results to annotations tagged with the specified value.",
)
tags = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
missing=colander.drop,
description="Alias of tag.",
)
text = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
missing=colander.drop,
description="Limit the results to annotations that contain this text in their textual body.",
)
uri = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
missing=colander.drop,
description="""Limit the results to annotations matching the specific URI
or equivalent URIs. URI can be a URL (a web page address) or
a URN representing another kind of resource such as DOI
(Digital Object Identifier) or a PDF fingerprint.""",
)
uri_parts = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
name="uri.parts",
missing=colander.drop,
description="""Limit the results to annotations with the given keyword
appearing in the URL.""",
)
url = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
missing=colander.drop,
description="Alias of uri.",
)
wildcard_uri = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
validator=_validate_wildcard_uri,
missing=colander.drop,
description="""
Limit the results to annotations matching the wildcard URI.
URI can be a URL (a web page address) or a URN representing another
kind of resource such as DOI (Digital Object Identifier) or a
PDF fingerprint.
`*` will match any character sequence (including an empty one),
and a `_` will match any single character. Wildcards are only permitted
within the path and query parts of the URI.
Escaping wildcards is not supported.
Examples of valid uris":" `http://foo.com/*` `urn:x-pdf:*` `file://localhost/_bc.pdf`
Examples of invalid uris":" `*foo.com` `u_n:*` `file://*` `http://foo.com*`
""",
)
any = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
missing=colander.drop,
description="""Limit the results to annotations whose quote, tags,
text or url fields contain this keyword.""",
)
user = colander.SchemaNode(
colander.String(),
missing=colander.drop,
description="Limit the results to annotations made by the specified user.",
)
def validator(self, node, cstruct):
sort = cstruct["sort"]
search_after = cstruct.get("search_after", None)
if search_after:
if sort in ["updated", "created"] and not self._date_is_parsable(
search_after
):
raise colander.Invalid(
node,
"""search_after must be a parsable date in the form
yyyy-MM-dd'T'HH:mm:ss.SSX
or time in miliseconds since the epoch.""",
)
# offset must be set to 0 if search_after is specified.
cstruct["offset"] = 0
def _date_is_parsable(self, value):
"""Return True if date is parsable and False otherwise."""
# Dates like "2017" can also be cast as floats so if a number is less
# than 9999 it is assumed to be a year and not ms since the epoch.
try:
if float(value) < 9999:
raise ValueError("This is not in the form ms since the epoch.")
except ValueError:
try:
parse(value)
except ValueError:
return False
return True
| 35.168122 | 101 | 0.562488 | [
"BSD-2-Clause"
] | bibliotechie/h | h/schemas/annotation.py | 16,107 | Python |
import tensorflow as tf
import tensorflow_zero_out
import numpy as np
import os
# Create a model using low-level tf.* APIs
class ZeroOut(tf.Module):
@tf.function(input_signature=[tf.TensorSpec(shape=[None], dtype=tf.int32)])
def __call__(self, x):
return tensorflow_zero_out.zero_out(x)
model = ZeroOut()
# (ro run your model) result = Squared(5.0) # This prints "25.0"
# (to generate a SavedModel) tf.saved_model.save(model, "saved_model_tf_dir")
concrete_func = model.__call__.get_concrete_function()
# Convert the model.
# Notes that for the versions earlier than TensorFlow 2.7, the
# from_concrete_functions API is able to work when there is only the first
# argument given:
# > converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func],
)
tflite_model = converter.convert()
# Save the model.
with open('model.tflite', 'wb') as f:
f.write(tflite_model) | 38 | 80 | 0.722222 | [
"Apache-2.0"
] | yuko29/TF_custom_op | tensorflow_zero_out/python/ops/convert_to_tflite.py | 1,026 | Python |
# -*- coding: utf-8 -*-
#
# Author: oldj
# Email: [email protected]
# Blog: http://oldj.net
#
import os
import re
import StringIO
from PIL import Image
from PIL import ImageDraw
import pygame
g_script_folder = os.path.dirname(os.path.abspath(__file__))
g_fonts_folder = os.path.join(g_script_folder, "fonts")
g_re_first_word = re.compile((u""
+ u"(%(prefix)s+\S%(postfix)s+)" # 标点
+ u"|(%(prefix)s*\w+%(postfix)s*)" # 单词
+ u"|(%(prefix)s+\S)|(\S%(postfix)s+)" # 标点
+ u"|(\d+%%)" # 百分数
) % {
"prefix": u"['\"\(<\[\{‘“(《「『]",
"postfix": u"[:'\"\)>\]\}:’”)》」』,;\.\?!,、;。?!]",
})
pygame.init()
def getFontForPyGame(font_name="wqy-zenhei.ttc", font_size=14):
return pygame.font.Font(os.path.join(g_fonts_folder, font_name), font_size)
def makeConfig(cfg=None):
if not cfg or type(cfg) != dict:
cfg = {}
default_cfg = {
"width": 440, # px
"padding": (15, 18, 20, 18),
"line-height": 20, #px
"title-line-height": 32, #px
"font-size": 14, # px
"title-font-size": 24, # px
"font-family": "wqy-zenhei.ttc",
# "font-family": "msyh.ttf",
"font-color": (0, 0, 0),
"font-antialiasing": True, # 字体是否反锯齿
"background-color": (255, 255, 255),
"border-size": 1,
"border-color": (192, 192, 192),
"copyright": u"本图文由 txt2.im 自动生成,但不代表 txt2.im 赞同其内容或立场。",
"copyright-center": False, # 版权信息居中显示,如为 False 则居左显示
"first-line-as-title": True,
"break-word": False,
}
default_cfg.update(cfg)
return default_cfg
def makeLineToWordsList(line, break_word=False):
u"""将一行文本转为单词列表"""
if break_word:
return [c for c in line]
lst = []
while line:
ro = g_re_first_word.match(line)
end = 1 if not ro else ro.end()
lst.append(line[:end])
line = line[end:]
return lst
def makeLongLineToLines(long_line, start_x, start_y, width, line_height, font, cn_char_width=0):
u"""将一个长行分成多个可显示的短行"""
txt = long_line
# txt = u"测试汉字abc123"
# txt = txt.decode("utf-8")
if not txt:
return [None]
words = makeLineToWordsList(txt)
lines = []
if not cn_char_width:
cn_char_width, h = font.size(u"汉")
avg_char_per_line = width / cn_char_width
if avg_char_per_line <= 1:
avg_char_per_line = 1
line_x = start_x
line_y = start_y
while words:
tmp_words = words[:avg_char_per_line]
tmp_ln = "".join(tmp_words)
w, h = font.size(tmp_ln)
wc = len(tmp_words)
while w < width and wc < len(words):
wc += 1
tmp_words = words[:wc]
tmp_ln = "".join(tmp_words)
w, h = font.size(tmp_ln)
while w > width and len(tmp_words) > 1:
tmp_words = tmp_words[:-1]
tmp_ln = "".join(tmp_words)
w, h = font.size(tmp_ln)
if w > width and len(tmp_words) == 1:
# 处理一个长单词或长数字
line_y = makeLongWordToLines(
tmp_words[0], line_x, line_y, width, line_height, font, lines
)
words = words[len(tmp_words):]
continue
line = {
"x": line_x,
"y": line_y,
"text": tmp_ln,
"font": font,
}
line_y += line_height
words = words[len(tmp_words):]
lines.append(line)
if len(lines) >= 1:
# 去掉长行的第二行开始的行首的空白字符
while len(words) > 0 and not words[0].strip():
words = words[1:]
return lines
def makeLongWordToLines(long_word, line_x, line_y, width, line_height, font, lines):
if not long_word:
return line_y
c = long_word[0]
char_width, char_height = font.size(c)
default_char_num_per_line = width / char_width
while long_word:
tmp_ln = long_word[:default_char_num_per_line]
w, h = font.size(tmp_ln)
l = len(tmp_ln)
while w < width and l < len(long_word):
l += 1
tmp_ln = long_word[:l]
w, h = font.size(tmp_ln)
while w > width and len(tmp_ln) > 1:
tmp_ln = tmp_ln[:-1]
w, h = font.size(tmp_ln)
l = len(tmp_ln)
long_word = long_word[l:]
line = {
"x": line_x,
"y": line_y,
"text": tmp_ln,
"font": font,
}
line_y += line_height
lines.append(line)
return line_y
def makeMatrix(txt, font, title_font, cfg):
width = cfg["width"]
data = {
"width": width,
"height": 0,
"lines": [],
}
a = txt.split("\n")
cur_x = cfg["padding"][3]
cur_y = cfg["padding"][0]
cn_char_width, h = font.size(u"汉")
for ln_idx, ln in enumerate(a):
ln = ln.rstrip()
if ln_idx == 0 and cfg["first-line-as-title"]:
f = title_font
line_height = cfg["title-line-height"]
else:
f = font
line_height = cfg["line-height"]
current_width = width - cur_x - cfg["padding"][1]
lines = makeLongLineToLines(ln, cur_x, cur_y, current_width, line_height, f, cn_char_width=cn_char_width)
cur_y += line_height * len(lines)
data["lines"].extend(lines)
data["height"] = cur_y + cfg["padding"][2]
return data
def makeImage(data, cfg):
u"""
"""
width, height = data["width"], data["height"]
if cfg["copyright"]:
height += 48
im = Image.new("RGB", (width, height), cfg["background-color"])
dr = ImageDraw.Draw(im)
for ln_idx, line in enumerate(data["lines"]):
__makeLine(im, line, cfg)
# dr.text((line["x"], line["y"]), line["text"], font=font, fill=cfg["font-color"])
# 缩放
# im = im.resize((width / 2, height / 2), Image.ANTIALIAS)
drawBorder(im, dr, cfg)
drawCopyright(im, dr, cfg)
return im
def drawCopyright(im, dr, cfg):
u"""绘制版权信息"""
if not cfg["copyright"]:
return
font = getFontForPyGame(font_name=cfg["font-family"], font_size=12)
rtext = font.render(cfg["copyright"],
cfg["font-antialiasing"], (128, 128, 128), cfg["background-color"]
)
sio = StringIO.StringIO()
pygame.image.save(rtext, sio)
sio.seek(0)
copyright_im = Image.open(sio)
iw, ih = im.size
cw, ch = rtext.get_size()
padding = cfg["padding"]
offset_y = ih - 32 - padding[2]
if cfg["copyright-center"]:
cx = (iw - cw) / 2
else:
cx = cfg["padding"][3]
cy = offset_y + 12
dr.line([(padding[3], offset_y), (iw - padding[1], offset_y)], width=1, fill=(192, 192, 192))
im.paste(copyright_im, (cx, cy))
def drawBorder(im, dr, cfg):
u"""绘制边框"""
if not cfg["border-size"]:
return
w, h = im.size
x, y = w - 1, h - 1
dr.line(
[(0, 0), (x, 0), (x, y), (0, y), (0, 0)],
width=cfg["border-size"],
fill=cfg["border-color"],
)
def __makeLine(im, line, cfg):
if not line:
return
sio = StringIO.StringIO()
x, y = line["x"], line["y"]
text = line["text"]
font = line["font"]
rtext = font.render(text, cfg["font-antialiasing"], cfg["font-color"], cfg["background-color"])
pygame.image.save(rtext, sio)
sio.seek(0)
ln_im = Image.open(sio)
im.paste(ln_im, (x, y))
def txt2im(txt, outfn, cfg=None, show=False):
# print(cfg)
cfg = makeConfig(cfg)
# print(cfg)
font = getFontForPyGame(cfg["font-family"], cfg["font-size"])
title_font = getFontForPyGame(cfg["font-family"], cfg["title-font-size"])
data = makeMatrix(txt, font, title_font, cfg)
im = makeImage(data, cfg)
im.save(outfn)
if os.name == "nt" and show:
im.show()
def test():
c = open("test.txt", "rb").read().decode("utf-8")
txt2im(c, "test.png", show=True)
if __name__ == "__main__":
test()
| 24.259146 | 113 | 0.544803 | [
"Apache-2.0"
] | bopopescu/dockerizeme | hard-gists/9c4d012d6fff059ccea7/snippet.py | 8,249 | Python |
###
# (C) Copyright [2019-2020] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from simplivity.ovc_client import OVC
from simplivity.exceptions import HPESimpliVityException
import pprint
pp = pprint.PrettyPrinter(indent=4)
config = {
"ip": "<ovc_ip>",
"credentials": {
"username": "<username>",
"password": "<password>"
}
}
ovc = OVC(config)
policies = ovc.policies
hosts = ovc.hosts
clusters = ovc.omnistack_clusters
cluster_groups = ovc.cluster_groups
print("\n\nget_all with default params")
all_policies = policies.get_all()
count = len(all_policies)
for policy in all_policies:
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print(f"Total number of policies : {count}")
policy_object = all_policies[0]
print("\n\nget_all with filters")
all_policies = policies.get_all(filters={'name': policy_object.data["name"]})
count = len(all_policies)
for policy in all_policies:
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print(f"Total number of policies : {count}")
print("\n\nget_all with pagination")
pagination = policies.get_all(limit=105, pagination=True, page_size=50)
end = False
while not end:
data = pagination.data
print("Page size:", len(data["resources"]))
print(f"{pp.pformat(data)}")
try:
pagination.next_page()
except HPESimpliVityException:
end = True
print("\n\nget_by_id")
policy = policies.get_by_id(policy_object.data["id"])
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print("\n\nget_by_name")
policy = policies.get_by_name(policy_object.data["name"])
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print("\n\nget_all VMs using this policy")
vms = policy.get_vms()
print(policy.data)
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print(f"{pp.pformat(vms)} \n")
print("\n\ncreate policy")
policy_name = "fixed_frequency_retention_policy"
policy = policies.create(policy_name)
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
multiple_rules = [
{
"start_time": "14:30",
"end_time": "15:30",
"application_consistent": False,
"frequency": 3,
"retention": 5
},
{
"frequency": 5,
"retention": 6
}
]
print("\n\nadd rules to policy")
policy.create_rules(multiple_rules)
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
single_rule = {
"frequency": 10,
"retention": 12
}
policy.create_rules(single_rule)
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print("\n\nget rule")
all_rules = policy.data["rules"]
for rule in all_rules:
rule_obj = policy.get_rule(rule.get('id'))
print(f"{pp.pformat(rule_obj)} \n")
print("\n\ndelete rule")
rule_id = policy.data["rules"][0]['id']
policy.delete_rule(rule_id)
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print("\n\nsuspend policy on host")
host = hosts.get_all()[0]
policies.suspend(host)
print("\n\nsuspend policy on omnistack_cluster")
cluster = clusters.get_all()[0]
policies.suspend(cluster)
""" cluster_group options works only with setup having MVA, please use below code for setup with MVA
cluster_group = cluster_groups.get_all()[0]
print(f"{cluster_group}")
print(f"{pp.pformat(cluster_group.data)} \n")
policies.suspend(cluster_group)
"""
""" federation options works only with setup NOT having MVA, please use below code for setup without MVA
print("\n\nsuspend policy on federation")
policies.suspend()
"""
print("\n\nrename policy")
policy.rename(f"renamed_{policy.data['name']}")
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print("\n\ndelete policy")
policy.delete()
| 26.974194 | 104 | 0.702464 | [
"Apache-2.0"
] | simplivity/simplivity-python | examples/policies.py | 4,181 | Python |
# Copyright 2014-2016 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tornado compatibility layer for Motor, an asynchronous MongoDB driver.
See "Frameworks" in the Developer Guide.
"""
import functools
import os
import tornado.process
import warnings
from concurrent.futures import ThreadPoolExecutor
from tornado import concurrent, gen, ioloop, version as tornado_version
from tornado.gen import chain_future, coroutine # For framework interface.
from .. import DummySession as Session
try:
import contextvars
except ImportError:
contextvars = None
CLASS_PREFIX = ''
def get_event_loop():
return ioloop.IOLoop.current()
def is_event_loop(loop):
return isinstance(loop, ioloop.IOLoop)
def check_event_loop(loop):
if not is_event_loop(loop):
raise TypeError(
"io_loop must be instance of IOLoop, not %r" % loop)
def get_future(loop):
return concurrent.Future()
if 'MOTOR_MAX_WORKERS' in os.environ:
max_workers = int(os.environ['MOTOR_MAX_WORKERS'])
else:
max_workers = tornado.process.cpu_count() * 5
_EXECUTOR = ThreadPoolExecutor(max_workers=max_workers)
def run_on_executor(loop, fn, *args, **kwargs):
if contextvars:
context = contextvars.copy_context()
fn = functools.partial(context.run, fn)
return loop.run_in_executor(
_EXECUTOR, functools.partial(fn, *args, **kwargs))
def chain_return_value(future, loop, return_value):
"""Compatible way to return a value in all Pythons.
PEP 479, raise StopIteration(value) from a coroutine won't work forever,
but "return value" doesn't work in Python 2. Instead, Motor methods that
return values resolve a Future with it, and are implemented with callbacks
rather than a coroutine internally.
"""
chained = concurrent.Future()
def copy(_future):
# Return early if the task was cancelled.
if chained.done():
return
if _future.exception() is not None:
chained.set_exception(_future.exception())
else:
chained.set_result(return_value)
future.add_done_callback(functools.partial(loop.add_callback, copy))
return chained
def is_future(f):
return isinstance(f, concurrent.Future)
def call_soon(loop, callback, *args, **kwargs):
if args or kwargs:
loop.add_callback(functools.partial(callback, *args, **kwargs))
else:
loop.add_callback(callback)
def add_future(loop, future, callback, *args):
loop.add_future(future, functools.partial(callback, *args))
def pymongo_class_wrapper(f, pymongo_class):
"""Executes the coroutine f and wraps its result in a Motor class.
See WrapAsync.
"""
@functools.wraps(f)
async def _wrapper(self, *args, **kwargs):
result = await f(self, *args, **kwargs)
# Don't call isinstance(), not checking subclasses.
if result.__class__ == pymongo_class:
# Delegate to the current object to wrap the result.
return self.wrap(result)
else:
return result
return _wrapper
def yieldable(future):
warnings.warn(
"The yieldable function is deprecated and will be removed in "
"Motor 3.0", DeprecationWarning, stacklevel=2)
return future
def platform_info():
return 'Tornado %s' % (tornado_version,)
| 27.140845 | 78 | 0.704203 | [
"Apache-2.0"
] | smurfix/motor | motor/frameworks/tornado/__init__.py | 3,854 | Python |
# -*- coding: utf-8 -*-
"""Collection of useful http error for the Api"""
class JsonApiException(Exception):
"""Base exception class for unknown errors"""
title = "Unknown error"
status = "500"
source = None
def __init__(
self,
detail,
source=None,
title=None,
status=None,
code=None,
id_=None,
links=None,
meta=None,
):
"""Initialize a jsonapi exception
:param dict source: the source of the error
:param str detail: the detail of the error
"""
self.detail = detail
self.source = source
self.code = code
self.id = id_
self.links = links or {}
self.meta = meta or {}
if title is not None:
self.title = title
if status is not None:
self.status = status
def to_dict(self):
"""Return values of each fields of an jsonapi error"""
error_dict = {}
for field in (
"status",
"source",
"title",
"detail",
"id",
"code",
"links",
"meta",
):
if getattr(self, field, None):
error_dict.update({field: getattr(self, field)})
return error_dict
class BadRequest(JsonApiException):
"""BadRequest error"""
title = "Bad request"
status = "400"
class InvalidField(BadRequest):
"""Error to warn that a field specified in fields querystring is not in the requested resource schema"""
title = "Invalid fields querystring parameter."
source = {"parameter": "fields"}
class InvalidInclude(BadRequest):
"""Error to warn that a field specified in include querystring parameter is not a relationship of the requested
resource schema
"""
title = "Invalid include querystring parameter."
source = {"parameter": "include"}
class InvalidFilters(BadRequest):
"""Error to warn that a specified filters in querystring parameter contains errors"""
title = "Invalid filters querystring parameter."
source = {"parameter": "filters"}
class InvalidSort(BadRequest):
"""Error to warn that a field specified in sort querystring parameter is not in the requested resource schema"""
title = "Invalid sort querystring parameter."
source = {"parameter": "sort"}
class ObjectNotFound(JsonApiException):
"""Error to warn that an object is not found in a database"""
title = "Object not found"
status = "404"
class RelatedObjectNotFound(ObjectNotFound):
"""Error to warn that a related object is not found"""
title = "Related object not found"
class RelationNotFound(JsonApiException):
"""Error to warn that a relationship is not found on a model"""
title = "Relation not found"
class InvalidType(JsonApiException):
"""Error to warn that there is a conflit between resource types"""
title = "Invalid type"
status = "409"
class AccessDenied(JsonApiException):
"""Throw this error when requested resource owner doesn't match the user of the ticket"""
title = "Access denied"
status = "403"
class InvalidContentType(JsonApiException):
"""When the request uses a content type the API doesn't understand"""
title = "Bad request"
status = "415"
class InvalidAcceptType(JsonApiException):
"""When the request expects a content type that the API doesn't support"""
title = "Bad request"
status = "406"
| 24.93617 | 116 | 0.625711 | [
"MIT"
] | Leechael/flapison | flapison/exceptions.py | 3,516 | Python |
import argparse
import logging
import json
import os
import tempfile
import sys
import re
import flywheel
from .supporting_files import bidsify_flywheel, utils, templates
from .supporting_files.project_tree import get_project_tree
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('curate-bids')
def clear_meta_info(context, template):
if 'info' in context and template.namespace in context['info']:
del context['info'][template.namespace]
def format_validation_error(err):
path = '/'.join(err.path)
if path:
return path + ' ' + err.message
return err.message
def validate_meta_info(container, template):
""" Validate meta information
Adds 'BIDS.NA' if no BIDS info present
Adds 'BIDS.valid' and 'BIDS.error_message'
to communicate to user if values are valid
Currently, validation is only checking if
mandatory properties are non-empty strings
Could add the following checks:
Are the values alpha numeric?
"""
# Get namespace
namespace = template.namespace
# If 'info' is NOT in container, then must not
# have matched to a template, create 'info'
# field with object {'BIDS': 'NA'}
if 'info' not in container:
container['info'] = {namespace: 'NA'}
# if the namespace ('BIDS') is NOT in 'info',
# then must not have matched to a template,
# add {'BIDS': 'NA'} to the meta info
elif namespace not in container['info']:
container['info'][namespace] = 'NA'
# If already assigned BIDS 'NA', then break
elif container['info'][namespace] == 'NA':
pass
# Otherwise, iterate over keys within container
else:
valid = True
error_message = ''
# Find template
templateName = container['info'][namespace].get('template')
if templateName:
templateDef = template.definitions.get(templateName)
if templateDef:
errors = template.validate(templateDef, container['info'][namespace])
if errors:
valid = False
error_message = '\n'.join([format_validation_error(err) for err in errors])
else:
valid = False
error_message += 'Unknown template: %s. ' % templateName
# Assign 'valid' and 'error_message' values
container['info'][namespace]['valid'] = valid
container['info'][namespace]['error_message'] = error_message
def update_meta_info(fw, context):
""" Update file information
"""
# Modify file
if context['container_type'] == 'file':
# Modify acquisition file
if context['parent_container_type'] == 'acquisition':
fw.set_acquisition_file_info(
context['acquisition']['id'],
context['file']['name'],
context['file']['info']
)
# Modify project file
elif context['parent_container_type'] == 'project':
fw.set_project_file_info(
context['project']['id'],
context['file']['name'],
context['file']['info']
)
# Modify session file
elif context['parent_container_type'] == 'session':
fw.set_session_file_info(
context['session']['id'],
context['file']['name'],
context['file']['info']
)
else:
logger.info('Cannot determine file parent container type: ' + context['parent_container_type'])
# Modify project
elif context['container_type'] == 'project':
fw.replace_project_info(context['project']['id'], context['project']['info'])
# Modify session
elif context['container_type'] == 'session':
fw.replace_session_info(context['session']['id'], context['session']['info'])
# Modify acquisition
elif context['container_type'] == 'acquisition':
fw.replace_acquisition_info(context['acquisition']['id'], context['acquisition']['info'])
# Cannot determine container type
else:
logger.info('Cannot determine container type: ' + context['container_type'])
def curate_bids_dir(fw, project_id, session_id=None, reset=False, template_file=None, session_only=False):
"""
fw: Flywheel client
project_id: project id of project to curate
session_id: The optional session id to curate
reset: Whether or not to reset bids info before curation
template_file: The template file to use
session_only: If true, then only curate the provided session
"""
project = get_project_tree(fw, project_id, session_id=session_id, session_only=session_only)
curate_bids_tree(fw, project, reset, template_file, True)
def curate_bids_tree(fw, project, reset=False, template_file=None, update=True):
# Get project
project_files = project.get('files', [])
# Get template (for now, just use default)
template = templates.DEFAULT_TEMPLATE
# Check for project file
if not template_file:
template_filename = utils.find_custom_template(project_files)
if template_filename:
fd, path = tempfile.mkstemp('.json')
os.close(fd)
logger.info('Using project template: {0}'.format(template_filename))
fw.download_file_from_project(project['id'], template_filename, path)
template_file = path
if template_file:
template = templates.loadTemplate(template_file)
##
# Curation is now a 3-pass process
# 1. Do initial template matching and updating
# 2. Perform any path resolutions
# 3. Send updates to server
##
# 1. Do initial template matching and updating
for context in project.context_iter():
ctype = context['container_type']
parent_ctype = context['parent_container_type']
if reset:
clear_meta_info(context[ctype], template)
elif context[ctype].get('info',{}).get('BIDS') == 'NA':
continue
if ctype == 'project':
bidsify_flywheel.process_matching_templates(context, template)
# Validate meta information
# TODO: Improve the validator to understand what is valid for dataset_description file...
# validate_meta_info(context['project'])
elif ctype == 'session':
bidsify_flywheel.process_matching_templates(context, template)
# Add run_counter
context['run_counters'] = utils.RunCounterMap()
elif ctype == 'acquisition':
bidsify_flywheel.process_matching_templates(context, template)
elif ctype == 'file':
if parent_ctype == 'project' and PROJECT_TEMPLATE_FILE_NAME_REGEX.search(context['file']['name']):
# Don't BIDSIFY project template
continue
# Process matching
context['file'] = bidsify_flywheel.process_matching_templates(context, template)
# Validate meta information
validate_meta_info(context['file'], template)
# 2. Perform any path resolutions
session = None
for context in project.context_iter():
# Resolution
bidsify_flywheel.process_resolvers(context, template)
# 3. Send updates to server
if update:
for context in project.context_iter():
ctype = context['container_type']
node = context[ctype]
if node.is_dirty():
update_meta_info(fw, context)
def main_with_args(api_key, session_id, reset, session_only):
### Prep
# Check API key - raises Error if key is invalid
fw = flywheel.Flywheel(api_key)
if session_id:
project_id = utils.get_project_id_from_session_id(fw, session_id)
else:
print('Session id is required!')
sys.exit(1)
### Curate BIDS project
curate_bids_dir(fw, project_id, session_id, reset=reset, session_only=session_only)
def main():
### Read in arguments
parser = argparse.ArgumentParser(description='BIDS Curation')
parser.add_argument('--api-key', dest='api_key', action='store',
required=True, help='API key')
parser.add_argument('-p', dest='project_label', action='store',
required=False, default=None, help='Project Label on Flywheel instance')
parser.add_argument('--session', dest='session_id', action='store',
required=False, default=None, help='Session ID, used to look up project if project label is not readily available')
parser.add_argument('--reset', dest='reset', action='store_true',
default=False, help='Reset BIDS data before running')
parser.add_argument('--session-only', dest='session_only', action='store_true',
default=False, help='Only curate the session identified by --session')
parser.add_argument('--template-file', dest='template_file', action='store',
default=None, help='Template file to use')
args = parser.parse_args()
### Prep
# Check API key - raises Error if key is invalid
fw = flywheel.Flywheel(args.api_key)
# Get project id from label
if args.project_label:
project_id = utils.validate_project_label(fw, args.project_label)
elif args.session_id:
project_id = utils.get_project_id_from_session_id(fw, args.session_id)
else:
print('Either project label or session id is required!')
sys.exit(1)
### Curate BIDS project
curate_bids_dir(fw, project_id, args.session_id, reset=args.reset, template_file=args.template_file, session_only=args.session_only)
if __name__ == '__main__':
main()
| 36.82197 | 136 | 0.640778 | [
"MIT"
] | AndysWorth/bids-client | flywheel_bids/curate_bids.py | 9,721 | Python |
class PlayerResourceHand:
def __init__(self):
self.brick = 0
self.grain = 0
self.lumber = 0
self.ore = 0
self.wool = 0
self.totalResources = 0
def update(self):
self.totalResources = self.brick + self.grain + self.lumber + self.ore + self.wool
class PlayerDevelopmentHand:
def __init__(self):
self.knights = 0
self.roadBuildings = 0
self.yearOfPlenty = 0
self.monopolies = 0
self.victoryPoints = 0
self.totalDevelopments = 0
def update(self):
self.totalDevelopments = self.knights + self.roadBuildings + self.yearOfPlenty + self.monopolies \
+ self.victoryPoints
class EnemyPlayer:
def __init__(self, turnOrder, name, color, nR, nS, nC, lR, lA, hS, dS, vVP):
self.turnOrder = turnOrder
self.name = name
self.color = color
self.handSize = hS
self.developmentSize = dS
self.visibleVictoryPoints = vVP
self.numRoads = nR
self.numSettlements = nS
self.numCities = nC
self.longestRoad = lR
self.largestArmy = lA
class Player:
def __init__(self, name, color, turnOrder):
self.color = color
self.name = name
self.turnOrder = turnOrder
self.numRoads = 15
self.numSettlements = 5
self.numCities = 4
self.longestRoad = 0
self.largestArmy = 0
self.victoryPoints = 0
self.resourceHand = PlayerResourceHand()
self.developmentHand = PlayerDevelopmentHand()
self.ownedRoads = list()
self.ownedNodes = list()
def getNumResources(self):
return self.resourceHand.totalResources
def getNumDevelopment(self):
return self.developmentHand.totalDevelopments
def getSendToEnemies(self):
# toSend = EnemyPlayer(self.turnOrder, self.name, self.color,
# self.numRoads, self.numSettlements, self.numCities,
# self.longestRoad, self.largestArmy)
toSend = ','.join([self.turnOrder, self.name, self.color, self.numRoads, self.numSettlements, self.numCities,
self.longestRoad, self.largestArmy])
return toSend
def acquireRoad(self, road):
self.ownedRoads.append(road)
def acquireNode(self, node):
self.ownedNodes.append(node)
def addResources(self, array):
self.resourceHand.brick += array[0]
self.resourceHand.grain += array[1]
self.resourceHand.lumber += array[2]
self.resourceHand.ore += array[3]
self.resourceHand.wool += array[4]
self.resourceHand.totalResources += array[0] + array[1] + array[2] + array[3] + array[4]
| 32.337209 | 117 | 0.607695 | [
"MIT"
] | ForgedSnow/Frontiersman | src/Player.py | 2,781 | Python |
import httpx
from anilist.types import Anime
from pyrogram import filters
from pyrogram.types import CallbackQuery
from pyromod.helpers import ikb
from pyromod.nav import Pagination
from amime.amime import Amime
@Amime.on_callback_query(filters.regex(r"^tvshort_trending7 anime (?P<page>\d+)"))
async def anime_suggestions(bot: Amime, callback: CallbackQuery):
page = int(callback.matches[0]["page"])
message = callback.message
lang = callback._lang
keyboard = []
async with httpx.AsyncClient(http2=True) as client:
response = await client.post(
url="https://graphql.anilist.co",
json=dict(
query="""
query($per_page: Int) {
Page(page: 8, perPage: $per_page) {
media(type: ANIME, format: TV_SHORT, sort: TRENDING_DESC, status: FINISHED) {
id
title {
romaji
english
native
}
siteUrl
}
}
}
""",
variables=dict(
perPage=100,
),
),
headers={
"Content-Type": "application/json",
"Accept": "application/json",
},
)
data = response.json()
await client.aclose()
if data["data"]:
items = data["data"]["Page"]["media"]
suggestions = [
Anime(id=item["id"], title=item["title"], url=item["siteUrl"])
for item in items
]
layout = Pagination(
suggestions,
item_data=lambda i, pg: f"menu {i.id}",
item_title=lambda i, pg: i.title.romaji,
page_data=lambda pg: f"tvshort_trending7 anime {pg}",
)
lines = layout.create(page, lines=8)
if len(lines) > 0:
keyboard += lines
keyboard.append([(lang.Prev, "tvshort_trending6 anime 1"), (lang.Next, "tvshort_trending8 anime 1")])
keyboard.append([(lang.back_button, "tvshort_menu")])
await message.edit_text(
lang.suggestions_text,
reply_markup=ikb(keyboard),
)
| 32.27027 | 105 | 0.490787 | [
"MIT"
] | Myudi422/ccgnime_req | amime/modules/anime/TV-SHORT/tvshort_trend/TVSHORT_TREND/tvshort_trend7.py | 2,388 | Python |
from typing import Optional
import pandas as pd
import pytest
from evidently.analyzers.regression_performance_analyzer import RegressionPerformanceAnalyzer
from evidently.model.widget import BaseWidgetInfo
from evidently.options import OptionsProvider
from evidently.pipeline.column_mapping import ColumnMapping
from evidently.dashboard.widgets.reg_error_normality_widget import RegErrorNormalityWidget
@pytest.fixture
def widget() -> RegErrorNormalityWidget:
options_provider = OptionsProvider()
widget = RegErrorNormalityWidget("test_widget")
widget.options_provider = options_provider
return widget
def test_reg_error_normality_widget_analyzer_list(widget: RegErrorNormalityWidget) -> None:
assert widget.analyzers() == [RegressionPerformanceAnalyzer]
@pytest.mark.parametrize(
"reference_data, current_data, data_mapping, dataset, expected_result",
(
(
pd.DataFrame({"target": [1, 2, 3, 4], "prediction": [1, 2, 3, 4]}),
None,
ColumnMapping(),
None,
BaseWidgetInfo(type="big_graph", title="test_widget", size=1),
),
(
pd.DataFrame({"target": [1, 2, 3, 4], "prediction": [1, 2, 3, 4]}),
pd.DataFrame({"target": [1, 2, 3, 4], "prediction": [1, 2, 3, 4]}),
ColumnMapping(),
"reference",
BaseWidgetInfo(type="big_graph", title="test_widget", size=1),
),
),
)
def test_reg_error_normality_widget_simple_case(
widget: RegErrorNormalityWidget,
reference_data: pd.DataFrame,
current_data: pd.DataFrame,
data_mapping: ColumnMapping,
dataset: Optional[str],
expected_result: BaseWidgetInfo,
) -> None:
if dataset is not None:
widget.dataset = dataset
analyzer = RegressionPerformanceAnalyzer()
analyzer.options_provider = widget.options_provider
analyzer_results = analyzer.calculate(reference_data, current_data, data_mapping)
result = widget.calculate(
reference_data, current_data, data_mapping, {RegressionPerformanceAnalyzer: analyzer_results}
)
if expected_result is not None:
# we have some widget for visualization
assert result.type == expected_result.type
assert result.title == expected_result.title
assert result.size == expected_result.size
assert result.params is not None
else:
# no widget data, show nothing
assert result is None
| 33.310811 | 101 | 0.696552 | [
"Apache-2.0"
] | Tapot/evidently | tests/dashboard/widgets/test_reg_error_normality_widget.py | 2,465 | Python |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/deed/faction_perk/hq/shared_hq_s05.iff"
result.attribute_template_id = 2
result.stfName("deed","hq_s05")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 25.941176 | 75 | 0.721088 | [
"MIT"
] | SWGANHServices/GameServer_Legacy | data/scripts/templates/object/tangible/deed/faction_perk/hq/shared_hq_s05.py | 441 | Python |
# -*- coding: utf-8 -*-
import unittest.mock
import pytest
import pycamunda.incident
from tests.mock import raise_requests_exception_mock, not_ok_response_mock
def test_get_params(engine_url):
get_incident = pycamunda.incident.Get(url=engine_url, id_='anId')
assert get_incident.url == engine_url + '/incident/anId'
assert get_incident.query_parameters() == {}
assert get_incident.body_parameters() == {}
@unittest.mock.patch('pycamunda.incident.Incident.load', unittest.mock.MagicMock())
@unittest.mock.patch('requests.Session.request')
def test_get_calls_requests(mock, engine_url):
get_incident = pycamunda.incident.Get(url=engine_url, id_='anId')
get_incident()
assert mock.called
assert mock.call_args[1]['method'].upper() == 'GET'
@unittest.mock.patch('requests.Session.request', raise_requests_exception_mock)
def test_get_raises_pycamunda_exception(engine_url):
get_incident = pycamunda.incident.Get(url=engine_url, id_='anId')
with pytest.raises(pycamunda.PyCamundaException):
get_incident()
@unittest.mock.patch('requests.Session.request', not_ok_response_mock)
@unittest.mock.patch('pycamunda.incident.Incident', unittest.mock.MagicMock())
@unittest.mock.patch('pycamunda.base._raise_for_status')
def test_get_raises_for_status(mock, engine_url):
get_incident = pycamunda.incident.Get(url=engine_url, id_='anId')
get_incident()
assert mock.called
@unittest.mock.patch('requests.Session.request', unittest.mock.MagicMock())
@unittest.mock.patch('pycamunda.base.from_isoformat', unittest.mock.MagicMock())
@unittest.mock.patch('pycamunda.incident.IncidentType', unittest.mock.MagicMock())
def test_get_returns_incident(engine_url):
get_incident = pycamunda.incident.Get(url=engine_url, id_='anId')
incident = get_incident()
assert isinstance(incident, pycamunda.incident.Incident)
| 34.796296 | 83 | 0.77009 | [
"MIT"
] | asyncee/pycamunda | tests/incident/test_get.py | 1,879 | Python |
import uuid
from datetime import datetime, timedelta
import pytest
import simplejson as json
from django.db.models import Q
from mock import Mock, patch
from treeherder.config.settings import IS_WINDOWS
from treeherder.perf.auto_perf_sheriffing.secretary_tool import SecretaryTool
from treeherder.model.models import Push, Job
from treeherder.perf.models import BackfillRecord, BackfillReport, PerformanceSettings
from treeherder.perf.auto_perf_sheriffing.outcome_checker import OutcomeChecker, OutcomeStatus
# we're testing against this (automatically provided by fixtures)
JOB_TYPE_ID = 1
def get_middle_index(successful_jobs):
# get middle index to make sure the push is in range
index_in_range = int((len(successful_jobs) + 1) / 2)
return index_in_range
@pytest.fixture
def record_backfilled(test_perf_alert, record_context_sample):
report = BackfillReport.objects.create(summary=test_perf_alert.summary)
record = BackfillRecord.objects.create(
alert=test_perf_alert,
report=report,
status=BackfillRecord.BACKFILLED,
)
record.set_context(record_context_sample)
record.save()
return record
@pytest.fixture
def range_dates(record_context_sample):
from_date = datetime.fromisoformat(record_context_sample[0]['push_timestamp'])
to_date = datetime.fromisoformat(record_context_sample[-1]['push_timestamp'])
return {
'before_date': from_date - timedelta(days=5),
'from_date': from_date,
'in_range_date': from_date + timedelta(hours=13),
'to_date': to_date,
'after_date': to_date + timedelta(days=3),
}
@pytest.fixture
def outcome_checking_pushes(
create_push, range_dates, record_context_sample, test_repository, test_repository_2
):
from_push_id = record_context_sample[0]['push_id']
to_push_id = record_context_sample[-1]['push_id']
pushes = [
create_push(test_repository, revision=uuid.uuid4(), time=range_dates['before_date']),
create_push(
test_repository,
revision=uuid.uuid4(),
time=range_dates['from_date'],
explicit_id=from_push_id,
),
create_push(test_repository, revision=uuid.uuid4(), time=range_dates['in_range_date']),
create_push(test_repository, revision=uuid.uuid4(), time=range_dates['in_range_date']),
create_push(test_repository, revision=uuid.uuid4(), time=range_dates['in_range_date']),
create_push(test_repository, revision=uuid.uuid4(), time=range_dates['in_range_date']),
create_push(
test_repository,
revision=uuid.uuid4(),
time=range_dates['to_date'],
explicit_id=to_push_id,
),
create_push(test_repository, revision=uuid.uuid4(), time=range_dates['after_date']),
]
return pushes
@pytest.fixture
def successful_jobs(outcome_checking_pushes, eleven_jobs_stored):
jobs = Job.objects.all()
_successful_jobs = []
pairs = zip(outcome_checking_pushes, jobs)
for push, job in pairs:
job.push = push
job.result = 'success'
job.job_type_id = JOB_TYPE_ID
job.save()
_successful_jobs.append(job)
return _successful_jobs
@pytest.fixture
def jobs_with_one_failed(successful_jobs):
index_in_range = get_middle_index(successful_jobs)
job_to_fail = successful_jobs[index_in_range]
job_to_fail.result = 'testfailed'
job_to_fail.save()
@pytest.fixture
def jobs_with_one_pending(successful_jobs):
index_in_range = get_middle_index(successful_jobs)
job_pending = successful_jobs[index_in_range]
job_pending.result = 'unknown'
job_pending.save()
@pytest.fixture
def get_outcome_checker_mock():
def get_outcome_checker_mock(outcome: OutcomeStatus):
return type('', (), {'check': lambda *params: outcome})
return get_outcome_checker_mock
@pytest.mark.skipif(IS_WINDOWS, reason="datetime logic does not work when OS not on GMT")
def test_secretary_tool_updates_only_matured_reports(
test_perf_alert, test_perf_alert_2, create_record
):
# create new report with records
create_record(test_perf_alert)
# create mature report with records
date_past = datetime.utcnow() - timedelta(hours=5)
with patch('django.utils.timezone.now', Mock(return_value=date_past)):
create_record(test_perf_alert_2)
assert BackfillRecord.objects.count() == 2
assert BackfillRecord.objects.filter(status=BackfillRecord.PRELIMINARY).count() == 2
SecretaryTool.mark_reports_for_backfill()
assert BackfillRecord.objects.filter(status=BackfillRecord.PRELIMINARY).count() == 1
def test_secretary_tool_uses_existing_settings(performance_settings):
assert PerformanceSettings.objects.count() == 1
last_reset_date_before = json.loads(performance_settings.settings)["last_reset_date"]
SecretaryTool.validate_settings()
assert PerformanceSettings.objects.count() == 1
settings_after = PerformanceSettings.objects.filter(name="perf_sheriff_bot").first()
assert json.loads(settings_after.settings)["last_reset_date"] == last_reset_date_before
def test_secretary_tool_resets_settings_if_expired(expired_performance_settings):
assert PerformanceSettings.objects.count() == 1
expired_last_reset_date = json.loads(expired_performance_settings.settings)["last_reset_date"]
SecretaryTool.validate_settings()
assert PerformanceSettings.objects.count() == 1
settings_after = PerformanceSettings.objects.filter(name="perf_sheriff_bot").first()
assert json.loads(settings_after.settings)["last_reset_date"] != expired_last_reset_date
def test_secretary_tool_creates_new_settings_if_none_exist(db):
assert PerformanceSettings.objects.count() == 0
SecretaryTool.validate_settings()
assert PerformanceSettings.objects.count() == 1
def test_check_outcome_after_success(get_outcome_checker_mock, record_backfilled):
outcome_checker_mock = get_outcome_checker_mock(OutcomeStatus.SUCCESSFUL)
secretary = SecretaryTool(outcome_checker_mock)
assert BackfillRecord.objects.filter(status=BackfillRecord.BACKFILLED).count() == 1
assert BackfillRecord.objects.filter(status=BackfillRecord.SUCCESSFUL).count() == 0
secretary.check_outcome()
assert BackfillRecord.objects.filter(status=BackfillRecord.BACKFILLED).count() == 0
assert BackfillRecord.objects.filter(status=BackfillRecord.SUCCESSFUL).count() == 1
def test_check_outcome_after_fail(get_outcome_checker_mock, record_backfilled):
outcome_checker_mock = get_outcome_checker_mock(OutcomeStatus.FAILED)
secretary = SecretaryTool(outcome_checker_mock)
assert BackfillRecord.objects.filter(status=BackfillRecord.BACKFILLED).count() == 1
assert BackfillRecord.objects.filter(status=BackfillRecord.FAILED).count() == 0
secretary.check_outcome()
assert BackfillRecord.objects.filter(status=BackfillRecord.BACKFILLED).count() == 0
assert BackfillRecord.objects.filter(status=BackfillRecord.FAILED).count() == 1
def test_no_action_when_in_progress(get_outcome_checker_mock, record_backfilled):
outcome_checker_mock = get_outcome_checker_mock(OutcomeStatus.IN_PROGRESS)
secretary = SecretaryTool(outcome_checker_mock)
assert BackfillRecord.objects.filter(status=BackfillRecord.BACKFILLED).count() == 1
secretary.check_outcome()
assert BackfillRecord.objects.filter(status=BackfillRecord.BACKFILLED).count() == 1
def test_outcome_checker_identifies_pushes_in_range(
record_backfilled, test_repository, test_repository_2, range_dates, outcome_checking_pushes
):
# TODO: retarget this test to BackfillRecord.get_pushes_in_range()
outcome_checker = OutcomeChecker()
total_pushes = Push.objects.count()
from_time = range_dates['from_date']
to_time = range_dates['to_date']
total_outside_pushes = Push.objects.filter(
Q(repository=test_repository) & (Q(time__lt=from_time) | Q(time__gt=to_time))
).count()
pushes_in_range = outcome_checker._get_pushes_in_range(from_time, to_time, test_repository.id)
assert len(pushes_in_range) == total_pushes - total_outside_pushes
# change repository for the first 2 pushes in range
assert test_repository.id != test_repository_2.id
total_changed_pushes = 2
for push in pushes_in_range[:total_changed_pushes]:
push.repository = test_repository_2
push.save()
total_other_repo_pushes = Push.objects.filter(repository=test_repository_2).count()
assert total_other_repo_pushes == total_changed_pushes
updated_pushes_in_range = outcome_checker._get_pushes_in_range(
from_time, to_time, test_repository.id
)
assert len(updated_pushes_in_range) == len(pushes_in_range) - total_other_repo_pushes
class TestOutcomeChecker:
@patch('treeherder.perf.auto_perf_sheriffing.outcome_checker.get_job_type')
def test_successful_jobs_mean_successful_outcome(
self, mock_get_job_type, record_backfilled, outcome_checking_pushes, successful_jobs
):
# TODO: remove job type mock after soft launch lands
mock_get_job_type.return_value = JOB_TYPE_ID
outcome_checker = OutcomeChecker()
response = outcome_checker.check(record_backfilled)
assert response == OutcomeStatus.SUCCESSFUL
@patch('treeherder.perf.auto_perf_sheriffing.outcome_checker.get_job_type')
def test_failed_job_means_failed_outcome(
self, mock_get_job_type, record_backfilled, outcome_checking_pushes, jobs_with_one_failed
):
mock_get_job_type.return_value = JOB_TYPE_ID
outcome_checker = OutcomeChecker()
response = outcome_checker.check(record_backfilled)
assert response == OutcomeStatus.FAILED
@patch('treeherder.perf.auto_perf_sheriffing.outcome_checker.get_job_type')
def test_pending_job_means_in_progress_outcome(
self, mock_get_job_type, record_backfilled, outcome_checking_pushes, jobs_with_one_pending
):
mock_get_job_type.return_value = JOB_TYPE_ID
outcome_checker = OutcomeChecker()
response = outcome_checker.check(record_backfilled)
assert response == OutcomeStatus.IN_PROGRESS
| 38.198502 | 98 | 0.761055 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | aerickson/treeherder | tests/perfalert/test_auto_perf_sheriffing/test_secretary_tool.py | 10,199 | Python |
#!/usr/bin/env python3
# This scripts attempts to generate massive design of experiment runscripts.
# and save it into a "runMassive.sh" and "doe.log".
#-------------------------------------------------------------------------------
import os, sys
import os.path
import re
import itertools
import glob
PUBLIC = ['nangate45', 'sky130hd', 'sky130hs', 'asap7']
# The number of generated config files into designs/{platform}/{design}/chunks/chuck{number} directory.
NumFilesPerChunk = 50000
## Orignal SDC file name
OriginalSDC = 'constraint_doe.sdc'
##################################
# define input parameters
##################################
# for generated .sh file name
ShellName = 'runMassive'
##################
# Design
##################
## Define platform-design. User should remove ',' for the last item in the list. (string)
PLATFORM_DESIGN = [ \
#'sky130hd-gcd' \
'sky130hd-ibex', \
#'sky130hd-aes', \
#'sky130hd-jpeg', \
#'sky130hs-gcd', \
#'sky130hs-ibex', \
#'sky130hs-aes', \
#'sky130hs-jpeg', \
#'nangate45-gcd', \
#'nangate45-ibex', \
#'nangate45-aes', \
#'nangate45-jpeg', \
#'asap7-gcd', \
#'asap7-ibex', \
#'asap7-aes', \
#'asap7-jpeg', \
]
## Target Clock Period (float)
CLK_PERIOD = []
## SDC uncertainty and IO delay.
## TODO: Currently, it only support when 'set uncertainty' and 'set io_delay'
## are defined in the constraint.sdc file.
UNCERTAINTY = []
IO_DELAY = []
##################
# Synthesis
##################
## Clock period for Yosys (for synthesis)
## The unit should follow each design (ns, ps) (float)
ABC_CLOCK_PERIOD = []
## Hierarchical Synthsis. 0 = hierarchical, 1 = flatten, empty = flatten (default) (int)
FLATTEN = []
##################
# Floorplan
##################
## Utilization. e.g, 45 -> 45% of core util. (int)
#CORE_UTIL = [20, 40, 55]
CORE_UTIL = [20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50]
## Aspect ratio. It REQUIRES 'CORE_UTIL' values (float)
ASPECT_RATIO = [0.5, 0.75, 1.0, 1.25, 1.5]
## Core-to-die gap distance (um). It REQUIRES 'CORE_UTIL' values (int)
CORE_DIE_MARGIN = [10]
## Pin Distance
#PINS_DISTANCE = [2]
PINS_DISTANCE = []
##################
# Placement
##################
## Global Placement Padding for std cells (int)
GP_PAD = [4]
## Detailed Placement Padding for std cells (int)
DP_PAD = [2]
## Global Placement target bin density (select only one option) (.2 float)
## option 1) PLACE_DENSITY uses the values in the list as it is.
## option 2) PLACE_DENSITY_LB_ADDON adds the values in the list to the lower boundary of the PLACE_DENSITY
## For eaxmple, PLACE_DENSITY_LB_ADDON = [0, 0.02, 0.04] means PLACE_DENSITY = [LB, LB+0.02, LB+0.04]
## LB of the place density == (total instance area + padding) / total die area
PLACE_DENSITY = []
PLACE_DENSITY_LB_ADDON = [0, 0.04, 0.08]
##################
# CTS
##################
## CTS clustering size and diameter (um) (int)
CTS_CLUSTER_SIZE = []
CTS_CLUSTER_DIAMETER = []
##################
# Global Routing
##################
## Set global routing layer capacity adjustment
## e.g.) 0.2 -> 20% usage for global routing
## Set for all layers.
## Each layer's layer adjustment will be overwritten with below per-layer values. (float)
LAYER_ADJUST = [0.5]
LAYER_ADJUST_M1 = [0, 0.2, 0.4, 0.6]
LAYER_ADJUST_M2 = [0, 0.2, 0.4, 0.6]
LAYER_ADJUST_M3 = [0, 0.2, 0.4, 0.6]
LAYER_ADJUST_M4 = [0, 0.2, 0.4, 0.6]
LAYER_ADJUST_M5 = []
LAYER_ADJUST_M6 = []
LAYER_ADJUST_M7 = []
LAYER_ADJUST_M8 = []
LAYER_ADJUST_M9 = []
## Set global routing random seed. (int)
GR_SEED = []
## Set allow global routing overflow. 0 = no, 1 = yes, empty = no (default) (int)
# TODO: currently it does not work. Let this as 0 as it is.
GR_OVERFLOW = [0]
##################
# Detailed Routing
##################
## Set global routing random seed. (int)
DR_SEED = []
SweepingAttributes = { "PLATFORM_DESIGN": PLATFORM_DESIGN,
"CP": CLK_PERIOD,
"ABC_CP": ABC_CLOCK_PERIOD,
"FLATTEN": FLATTEN,
"UNCERTAINTY": UNCERTAINTY,
"IO_DELAY": IO_DELAY,
"UTIL": CORE_UTIL,
"AR": ASPECT_RATIO,
"GAP": CORE_DIE_MARGIN,
"PINS_DISTANCE": PINS_DISTANCE,
"GP_PAD": GP_PAD,
"DP_PAD": DP_PAD,
"PD": PLACE_DENSITY,
"PD_LB_ADD": PLACE_DENSITY_LB_ADDON,
"CTS_CLUSTER_SIZE": CTS_CLUSTER_SIZE,
"CTS_CLUSTER_DIAMETER": CTS_CLUSTER_DIAMETER,
"LAYER_ADJUST": LAYER_ADJUST,
"M1": LAYER_ADJUST_M1,
"M2": LAYER_ADJUST_M2,
"M3": LAYER_ADJUST_M3,
"M4": LAYER_ADJUST_M4,
"M5": LAYER_ADJUST_M5,
"M6": LAYER_ADJUST_M6,
"M7": LAYER_ADJUST_M7,
"M8": LAYER_ADJUST_M8,
"M9": LAYER_ADJUST_M9,
"GR_SEED": GR_SEED,
"GR_OVERFLOW": GR_OVERFLOW,
"DR_SEED": DR_SEED }
def assignEmptyAttrs(dicts):
knobs = {}
for k, v in dicts.items():
if len(v) == 0:
knobs.setdefault(k, ['empty'])
else:
knobs.setdefault(k,v)
return knobs
def writeDoeLog(dicts, ProductDicts):
fo = open('./doe.log', 'w')
numRuns = 1
for k, v in dicts.items():
if len(v)>0:
print('%s has %s number of values'%(k,len(v)))
fo.write('%s has %s number of values\n'%(k,len(v)))
numRuns = numRuns * len(v)
fo.write('\nTotal Number of Runs = %s\n\n'%numRuns)
print('\nTotal Number of Runs = %s\n\n'%numRuns)
knobValuesList = []
knobNamesList = []
for CurAttrs in ProductAttrs:
knobValues = []
knobNames = []
for k, v in CurAttrs.items():
if v=='empty':
continue
else:
knobNames.append(str(k))
knobValues.append(str(v))
knobValuesList.append(knobValues)
knobNamesList.append(knobNames)
fo.write(str(knobNamesList[0])+'\n')
for knobSet in knobValuesList:
fo.write(str(knobSet)+'\n')
fo.close()
def productDict(dicts):
return (dict(zip(dicts, x)) for x in itertools.product(*dicts.values()))
def adjustFastRoute(filedata, adjSet, GrOverflow):
if adjSet[0]!='empty':
filedata = re.sub("(set_global_routing_layer_adjustment .* )[0-9\.]+", "\g<1>{:.2f}".format(float(adjSet[0])), filedata)
sep_la_cmds = ""
for i, sep_la in enumerate(adjSet):
if i==0 or sep_la=='empty':
continue
## TODO: Currently, only supports for SKY130HD and SKY130HS.
## TODO: user should manually change the layer name to match techLEF.
layer_name = 'met%s'%i
sep_la_cmds += "set_global_routing_layer_adjustment " + layer_name + " {:.2f}\n".format(float(sep_la))
filedata = re.sub("set_global_routing_layer_adjustment.*\n", "\g<0>"+sep_la_cmds, filedata)
if int(GrOverflow) == 1:
filedata = re.sub("(global_route.*(\n\s+.*)*)", "\g<1> \\\n -allow_overflow", filedata)
return(filedata)
#def setPlaceDensity(DESIGN, Util, GpPad):
# if DESIGN == "ibex":
# LB = (Util/100) + (GpPad * (0.4*(Util/100)-0.01))+0.01
# elif DESIGN == "aes":
# LB = (Util/100) + (GpPad * (0.5*(Util/100)-0.005))+0.02
# else:
# LB = (Util/100) + (GpPad * (0.4*(Util/100)-0.01))+0.01
# return LB
def writeConfigs(CurAttrs, CurChunkNum):
CurPlatform, CurDesign = CurAttrs.get('PLATFORM_DESIGN').split('-')
CurClkPeriod = CurAttrs.get('CP')
CurAbcClkPeriod = CurAttrs.get('ABC_CP')
CurFlatten = CurAttrs.get('FLATTEN')
CurUncertainty = CurAttrs.get('UNCERTAINTY')
CurIoDelay = CurAttrs.get('IO_DELAY')
CurCoreUtil = CurAttrs.get('UTIL')
CurAspectRatio = CurAttrs.get('AR')
CurCoreDieMargin = CurAttrs.get('GAP')
CurPinsDistance = CurAttrs.get('PINS_DISTANCE')
CurGpPad = CurAttrs.get('GP_PAD')
CurDpPad = CurAttrs.get('DP_PAD')
CurPlaceDensity = CurAttrs.get('PD')
CurPlaceDensityLbAddon = CurAttrs.get('PD_LB_ADD')
CurCtsClusterSize = CurAttrs.get('CTS_CLUSTER_SIZE')
CurCtsClusterDiameter = CurAttrs.get('CTS_CLUSTER_DIAMETER')
CurLayerAdjust = CurAttrs.get('LAYER_ADJUST')
CurLayerAdjustM1 = CurAttrs.get('M1')
CurLayerAdjustM2 = CurAttrs.get('M2')
CurLayerAdjustM3 = CurAttrs.get('M3')
CurLayerAdjustM4 = CurAttrs.get('M4')
CurLayerAdjustM5 = CurAttrs.get('M5')
CurLayerAdjustM6 = CurAttrs.get('M6')
CurLayerAdjustM7 = CurAttrs.get('M7')
CurLayerAdjustM8 = CurAttrs.get('M8')
CurLayerAdjustM9 = CurAttrs.get('M9')
CurGrSeed = CurAttrs.get('GR_SEED')
CurGrOverflow = CurAttrs.get('GR_OVERFLOW')
CurDrSeed = CurAttrs.get('DR_SEED')
if not os.path.isdir('./designs/%s/%s/chunks'%(CurPlatform,CurDesign)):
os.mkdir('./designs/%s/%s/chunks'%(CurPlatform,CurDesign))
CurDesignDir = './designs/%s/%s'%(CurPlatform,CurDesign)
CurChunkDir = './designs/%s/%s/chunks/chunk%s'%(CurPlatform,CurDesign,CurChunkNum)
if not os.path.isdir(CurChunkDir):
os.mkdir(CurChunkDir)
#print(CurChunkNum)
if MakeArg=='clean':
fileList = glob.glob('%s/*-DoE-*'%(CurChunkDir))
if fileList is not None:
for file in fileList:
os.remove(file)
return
#print(CurPlatform, CurDesign)
#print(CurClkPeriod, CurAbcClkPeriod, CurFlatten, CurCoreUtil)
#print(CurAspectRatio, CurCoreDieMargin, CurGpPad, CurDpPad)
#print(CurCtsClusterSize, CurCtsClusterDiameter, CurLayerAdjust)
#print(CurLayerAdjustM1, CurLayerAdjustM2, CurLayerAdjustM3)
#print(CurLayerAdjustM4, CurLayerAdjustM5, CurLayerAdjustM6)
#print(CurLayerAdjustM7, CurLayerAdjustM8, CurLayerAdjustM9)
#print(CurGrOverflow)
#print(CurAttrs.items())
variantName = ''
for k, v in CurAttrs.items():
if v!='empty' and k!='PLATFORM_DESIGN':
variantName = variantName + '-' + str(k) + '_' + str(v)
variantName = variantName[1:]
#fileName = 'config-%s-%s-'%(CurPlatform, CurDesign)+variantName + '.mk'
fileName = 'config-DoE-'+variantName + '.mk'
fo = open('%s/%s'%(CurChunkDir,fileName), 'w')
fo.write('include $(realpath $(dir $(DESIGN_CONFIG))../../)/config.mk\n')
fo.write('\n')
fo.write('FLOW_VARIANT = %s\n'%(variantName))
fo.write('\n')
if CurClkPeriod != 'empty' or CurUncertainty != 'empty' or CurIoDelay != 'empty':
fOrigSdc = open('%s/%s'%(CurDesignDir,OriginalSDC),'r')
filedata = fOrigSdc.read()
fOrigSdc.close()
if CurClkPeriod != 'empty':
filedata = re.sub("-period [0-9\.]+", "-period " + str(CurClkPeriod), filedata)
#filedata = re.sub("-waveform [{}\s0-9\.]+$}", "\n", filedata)
filedata = re.sub("-waveform [{}\s0-9\.]+[\s|\n]", "", filedata)
if CurUncertainty != 'empty':
filedata = re.sub("set uncertainty [0-9\.]+", "set uncertainty " + str(CurUncertainty), filedata)
if CurIoDelay != 'empty':
filedata = re.sub("set io_delay [0-9\.]+", "set io_delay " + str(CurIoDelay), filedata)
#fOutSdc = open('./designs/%s/%s/constraint-%s-%s-'%(CurPlatform,CurDesign,CurPlatform,CurDesign)+variantName+'.sdc','w')
fOutSdc = open('%s/constraint-DoE-'%(CurChunkDir)+variantName+'.sdc','w')
fOutSdc.write(filedata)
fOutSdc.close()
fo.write('export SDC_FILE = $(dir $(DESIGN_CONFIG))/constraint-DoE-%s.sdc\n'%variantName)
if CurAbcClkPeriod != 'empty':
fo.write('export ABC_CLOCK_PERIOD_IN_PS = %s\n'%CurAbcClkPeriod)
if CurFlatten != 'empty':
if CurFlatten == 0:
fo.write('export SYNTH_ARGS = \n')
if CurCoreUtil != 'empty':
fo.write('export CORE_UTILIZATION = %s\n'%CurCoreUtil)
if CurPlaceDensity != 'empty':
fo.write('export PLACE_DENSITY = %.2f\n'%CurPlaceDensity)
if CurPlaceDensityLbAddon != 'empty':
fo.write('export PLACE_DENSITY_LB_ADDON = %.2f\n'%CurPlaceDensityLbAddon)
if CurAspectRatio != 'empty':
fo.write('export CORE_ASPECT_RATIO = %s\n'%CurAspectRatio)
if CurCoreDieMargin != 'empty':
fo.write('export CORE_MARGIN = %s\n'%CurCoreDieMargin)
if CurPinsDistance != 'empty':
fo.write('export PLACE_PINS_ARGS = -min_distance %s\n'%CurPinsDistance)
if CurGpPad != 'empty':
fo.write('export CELL_PAD_IN_SITES_GLOBAL_PLACEMENT = %s\n'%CurGpPad)
if CurDpPad != 'empty':
fo.write('export CELL_PAD_IN_SITES_DETAIL_PLACEMENT = %s\n'%CurDpPad)
if CurCtsClusterSize != 'empty':
fo.write('export CTS_CLUSTER_SIZE = %s\n'%CurCtsClusterSize)
if CurCtsClusterDiameter != 'empty':
fo.write('export CTS_CLUSTER_DIAMETER = %s\n'%CurCtsClusterDiameter)
if CurDrSeed != 'empty':
fo.write('export OR_K = 1.0\n')
fo.write('export OR_SEED = %s\n'%CurDrSeed)
if CurLayerAdjust != 'empty' or \
CurLayerAdjustM1 != 'empty' or \
CurLayerAdjustM2 != 'empty' or \
CurLayerAdjustM3 != 'empty' or \
CurLayerAdjustM4 != 'empty' or \
CurLayerAdjustM5 != 'empty' or \
CurLayerAdjustM6 != 'empty' or \
CurLayerAdjustM7 != 'empty' or \
CurLayerAdjustM8 != 'empty' or \
CurLayerAdjustM9 != 'empty' or \
CurGrSeed != 'empty':
fo.write('export FASTROUTE_TCL = $(dir $(DESIGN_CONFIG))/fastroute-DoE-%s.tcl'%variantName)
if CurPlatform in PUBLIC:
PLATFORM_DIR = './platforms/%s'%CurPlatform
else:
PLATFORM_DIR = '../../%s'%CurPlatform
fFrIn = open('%s/fastroute.tcl'%PLATFORM_DIR,'r')
filedata = fFrIn.read()
fFrIn.close()
CurLayerAdjustSet = [CurLayerAdjust, \
CurLayerAdjustM1, \
CurLayerAdjustM2, \
CurLayerAdjustM3, \
CurLayerAdjustM4, \
CurLayerAdjustM5, \
CurLayerAdjustM6, \
CurLayerAdjustM7, \
CurLayerAdjustM8, \
CurLayerAdjustM9 ]
filedata = adjustFastRoute(filedata, CurLayerAdjustSet, CurGrOverflow)
FrName = 'fastroute-DoE-'+variantName+'.tcl'
fOutFr = open('%s/%s'%(CurChunkDir,FrName),'w')
fOutFr.write(filedata)
if CurGrSeed != 'empty':
fOutFr.write('set_global_routing_random -seed %s'%CurGrSeed)
fOutFr.close()
fo.close()
frun = open('./%s.sh'%ShellName, 'a')
RunName = 'DESIGN_CONFIG=%s/%s make\n'%(CurChunkDir,fileName)
frun.write(RunName)
frun.close()
fcollect = open('./%s_metrics_collect.sh'%ShellName, 'a')
CollectName = 'python util/genMetrics.py -x -p %s -d %s -v %s -o metrics_%s/%s.json\n'%(CurPlatform, CurDesign, variantName, ShellName, variantName)
fcollect.write(CollectName)
fcollect.close()
MakeArg = sys.argv[1]
if not os.path.isdir('./metrics_%s'%ShellName):
os.mkdir('./metrics_%s'%ShellName)
knobs = assignEmptyAttrs(SweepingAttributes)
ProductAttrs = list(productDict(knobs))
writeDoeLog(SweepingAttributes, ProductAttrs)
if os.path.isfile('./%s.sh'%ShellName):
os.remove('./%s.sh'%ShellName)
if os.path.isfile('./%s_metrics_collect.sh'%ShellName):
os.remove('./%s_metrics_collect.sh'%ShellName)
CurChunkNum = 0
for i, CurAttrs in enumerate(ProductAttrs, 1):
if i % NumFilesPerChunk == 0:
writeConfigs(CurAttrs, CurChunkNum)
CurChunkNum = CurChunkNum+1
else:
writeConfigs(CurAttrs, CurChunkNum)
# with open('file.txt') as data:
# line = data.readlines()
#
#for line in lines:
# with open('file.txt') as data:
# for line in file_data:
| 31.703863 | 150 | 0.648707 | [
"BSD-3-Clause"
] | ABKGroup/GenMassive | genMassive.py | 14,774 | Python |
from integration.helpers.base_test import BaseTest
class TestBasicLayerVersion(BaseTest):
"""
Basic AWS::Serverless::StateMachine tests
"""
def test_basic_state_machine_inline_definition(self):
"""
Creates a State Machine from inline definition
"""
self.create_and_verify_stack("basic_state_machine_inline_definition")
def test_basic_state_machine_with_tags(self):
"""
Creates a State Machine with tags
"""
self.create_and_verify_stack("basic_state_machine_with_tags")
tags = self.get_stack_tags("MyStateMachineArn")
self.assertIsNotNone(tags)
self._verify_tag_presence(tags, "stateMachine:createdBy", "SAM")
self._verify_tag_presence(tags, "TagOne", "ValueOne")
self._verify_tag_presence(tags, "TagTwo", "ValueTwo")
def _verify_tag_presence(self, tags, key, value):
"""
Verifies the presence of a tag and its value
Parameters
----------
tags : List of dict
List of tag objects
key : string
Tag key
value : string
Tag value
"""
tag = next(tag for tag in tags if tag["key"] == key)
self.assertIsNotNone(tag)
self.assertEqual(tag["value"], value)
| 29.659091 | 77 | 0.629885 | [
"Apache-2.0"
] | faraz891/serverless-application-model | integration/single/test_basic_state_machine.py | 1,305 | Python |
from __future__ import absolute_import
from .context import *
from .base_verbs import *
from .model import OpenShiftPythonException
from .model import Model, Missing
from .selector import *
from .apiobject import *
from . import naming
from . import status
from . import config
from .ansible import ansible
# Single source for module version
__VERSION__ = '1.0.12'
null = None # Allow scripts to specify null in object definitions
# Allows modules to trigger errors
def error(msg, **kwargs):
raise OpenShiftPythonException(msg, **kwargs)
# Convenience method for accessing the module version
def get_module_version():
return __VERSION__
| 23.321429 | 66 | 0.777948 | [
"Apache-2.0"
] | dmaizel/openshift-client-python | packages/openshift/__init__.py | 653 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test attention
"""
import unittest
import torch
from torch import tensor
from torch import nn
from function_GAT_attention import SpGraphAttentionLayer, ODEFuncAtt
from torch_geometric.utils import softmax, to_dense_adj
from data import get_dataset
class AttentionTests(unittest.TestCase):
def setUp(self):
self.edge = tensor([[0, 2, 2, 1], [1, 0, 1, 2]])
self.x = tensor([[1., 2.], [3., 2.], [4., 5.]], dtype=torch.float)
self.W = tensor([[2, 1], [3, 2]], dtype=torch.float)
self.alpha = tensor([[1, 2, 3, 4]], dtype=torch.float)
self.edge1 = tensor([[0, 0, 1, 1, 2, 2], [1, 2, 0, 2, 0, 1]])
self.x1 = torch.ones((3, 2), dtype=torch.float)
self.leakyrelu = nn.LeakyReLU(0.2)
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.opt = {'dataset': 'Cora', 'self_loop_weight': 1, 'leaky_relu_slope': 0.2, 'beta_dim': 'vc', 'heads': 2,
'K': 10,
'attention_norm_idx': 0, 'add_source': False, 'max_nfe': 1000, 'mix_features': False,
'attention_dim': 32,
'mixed_block': False, 'rewiring': None, 'no_alpha_sigmoid': False, 'reweight_attention': False,
'kinetic_energy': None, 'jacobian_norm2': None, 'total_deriv': None, 'directional_penalty': None}
def tearDown(self) -> None:
pass
def test(self):
h = torch.mm(self.x, self.W)
edge_h = torch.cat((h[self.edge[0, :], :], h[self.edge[1, :], :]), dim=1)
self.assertTrue(edge_h.shape == torch.Size([self.edge.shape[1], 2 * 2]))
ah = self.alpha.mm(edge_h.t()).t()
self.assertTrue(ah.shape == torch.Size([self.edge.shape[1], 1]))
edge_e = self.leakyrelu(ah)
attention = softmax(edge_e, self.edge[1])
print(attention)
def test_function(self):
in_features = self.x.shape[1]
out_features = self.x.shape[1]
def get_round_sum(tens, n_digits=3):
val = torch.sum(tens, dim=int(not self.opt['attention_norm_idx']))
return (val * 10 ** n_digits).round() / (10 ** n_digits)
att_layer = SpGraphAttentionLayer(in_features, out_features, self.opt, self.device, concat=True)
attention, _ = att_layer(self.x, self.edge) # should be n_edges x n_heads
self.assertTrue(attention.shape == (self.edge.shape[1], self.opt['heads']))
dense_attention1 = to_dense_adj(self.edge, edge_attr=attention[:, 0]).squeeze()
dense_attention2 = to_dense_adj(self.edge, edge_attr=attention[:, 1]).squeeze()
self.assertTrue(torch.all(torch.eq(get_round_sum(dense_attention1), 1.)))
self.assertTrue(torch.all(torch.eq(get_round_sum(dense_attention2), 1.)))
self.assertTrue(torch.all(attention > 0.))
self.assertTrue(torch.all(attention <= 1.))
dataset = get_dataset(self.opt, '../data', False)
data = dataset.data
in_features = data.x.shape[1]
out_features = data.x.shape[1]
att_layer = SpGraphAttentionLayer(in_features, out_features, self.opt, self.device, concat=True)
attention, _ = att_layer(data.x, data.edge_index) # should be n_edges x n_heads
self.assertTrue(attention.shape == (data.edge_index.shape[1], self.opt['heads']))
dense_attention1 = to_dense_adj(data.edge_index, edge_attr=attention[:, 0]).squeeze()
dense_attention2 = to_dense_adj(data.edge_index, edge_attr=attention[:, 1]).squeeze()
self.assertTrue(torch.all(torch.eq(get_round_sum(dense_attention1), 1.)))
self.assertTrue(torch.all(torch.eq(get_round_sum(dense_attention2), 1.)))
self.assertTrue(torch.all(attention > 0.))
self.assertTrue(torch.all(attention <= 1.))
def test_symetric_attention(self):
in_features = self.x1.shape[1]
out_features = self.x1.shape[1]
att_layer = SpGraphAttentionLayer(in_features, out_features, self.opt, self.device, concat=True)
attention, _ = att_layer(self.x1, self.edge1) # should be n_edges x n_heads
self.assertTrue(torch.all(torch.eq(attention, 0.5 * torch.ones((self.edge1.shape[1], self.x1.shape[1])))))
def test_module(self):
dataset = get_dataset(self.opt, '../data', False)
t = 1
out_dim = 6
func = ODEFuncAtt(dataset.data.num_features, out_dim, self.opt, dataset.data, self.device)
out = func(t, dataset.data.x)
print(out.shape)
self.assertTrue(out.shape == (dataset.data.num_nodes, dataset.num_features))
| 44.326531 | 113 | 0.67058 | [
"Apache-2.0"
] | dungxibo123/graph-neural-pde | test/test_attention.py | 4,344 | Python |
"""Platform for Husqvarna Automower device tracker integration."""
from homeassistant.components.device_tracker import SOURCE_TYPE_GPS
from homeassistant.components.device_tracker.config_entry import TrackerEntity
from homeassistant.helpers.entity import DeviceInfo
from .const import DOMAIN
async def async_setup_entry(hass, entry, async_add_devices) -> None:
"""Setup sensor platform."""
session = hass.data[DOMAIN][entry.entry_id]
async_add_devices(
AutomowerTracker(session, idx) for idx, ent in enumerate(session.data["data"])
)
class AutomowerTracker(TrackerEntity):
"""Defining the Device Tracker Entity."""
def __init__(self, session, idx) -> None:
self.session = session
self.idx = idx
self.mower = self.session.data["data"][self.idx]
mower_attributes = self.__get_mower_attributes()
self.mower_id = self.mower["id"]
self.mower_name = mower_attributes["system"]["name"]
self.model = mower_attributes["system"]["model"]
self.session.register_cb(
lambda _: self.async_write_ha_state(), schedule_immediately=True
)
def __get_mower_attributes(self) -> dict:
return self.session.data["data"][self.idx]["attributes"]
@property
def device_info(self) -> DeviceInfo:
return DeviceInfo(identifiers={(DOMAIN, self.mower_id)})
@property
def name(self) -> str:
"""Return the name of the entity."""
return self.mower_name
@property
def unique_id(self) -> str:
"""Return a unique identifier for this entity."""
return f"{self.mower_id}_dt"
@property
def source_type(self) -> str:
"""Return the source type, eg gps or router, of the device."""
return SOURCE_TYPE_GPS
@property
def latitude(self) -> float:
"""Return latitude value of the device."""
lat = self.__get_mower_attributes()["positions"][0]["latitude"]
return lat
@property
def longitude(self) -> float:
"""Return longitude value of the device."""
lon = self.__get_mower_attributes()["positions"][0]["longitude"]
return lon
| 33.402985 | 87 | 0.645666 | [
"MIT"
] | kalhimeo/husqvarna_automower | custom_components/husqvarna_automower/device_tracker.py | 2,238 | Python |
import django
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'portfolio.settings')
django.setup()
import base64
import tempfile
from django.test import TestCase, override_settings
from portfolio.portfolio_projects.forms import CommentForm, ProjectForm
from django.core.files.uploadedfile import InMemoryUploadedFile
from io import BytesIO
class TestForms(TestCase):
def test_comment_form_valid_data(self):
form = CommentForm({
'text': 'Text',
})
self.assertTrue(form.is_valid())
def test_comment_form_has_no_data(self):
form = CommentForm({
'text': '',
})
self.assertFalse(form.is_valid())
def test_project_form_has_no_data(self):
form = ProjectForm({})
self.assertFalse(form.is_valid())
self.assertEquals(len(form.errors), 4)
@override_settings(MEDIA_ROOT=tempfile.gettempdir())
def test_project_form_valid_data(self):
image = InMemoryUploadedFile(
BytesIO(base64.b64decode(TEST_IMAGE)),
field_name='tempfile',
name='tempfile.png',
content_type='image/png',
size=len(TEST_IMAGE),
charset='utf-8',
)
form = ProjectForm({
'title': 'Title1',
'description': 'Description1',
'link': 'https://www.google.com/',
}, {
'image': image,
})
self.assertTrue(form.is_valid())
TEST_IMAGE = '''
iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABmJLR0QA/wD/AP+gvaeTAAAACXBI
WXMAAABIAAAASABGyWs+AAAACXZwQWcAAAAQAAAAEABcxq3DAAABfElEQVQ4y52TvUuCURTGf5Zg
9goR9AVlUZJ9KURuUkhIUEPQUIubRFtIJTk0NTkUFfgntAUt0eBSQwRKRFSYBYFl1GAt901eUYuw
QTLM1yLPds/zPD/uPYereYjHcwD+tQ3+Uys+LwCah3g851la/lf4qwKb61Sn3z5WFUWpCHB+GUGb
SCRIpVKqBkmSAMrqsViMqnIiwLx7HO/U+6+30GYyaVXBP1uHrfUAWvWMWiF4+qoOUJLJkubYcDs2
S03hvODSE7564ek5W+Kt+tloa9ax6v4OZ++jZO+jbM+pD7oE4HM1lX1vYNGoDhCyQMiCGacRm0Vf
EM+uiudjke6YcRoLfiELNB2dXTkAa08LPlcT2fpJAMxWZ1H4NnKITuwD4Nl6RMgCAE1DY3PuyyQZ
JLrNvZhMJgCmJwYB2A1eAHASDiFkQUr5Xn0RoJLSDg7ZCB0fVRQ29/TmP1Nf/0BFgL2dQH4LN9dR
7CMOaiXDn6FayYB9xMHeTgCz1cknd+WC3VgTorUAAAAldEVYdGNyZWF0ZS1kYXRlADIwMTAtMTIt
MjZUMTQ6NDk6MjErMDk6MDAHHBB1AAAAJXRFWHRtb2RpZnktZGF0ZQAyMDEwLTEyLTI2VDE0OjQ5
OjIxKzA5OjAwWK1mQQAAAABJRU5ErkJggolQTkcNChoKAAAADUlIRFIAAAAQAAAAEAgGAAAAH/P/
YQAAAAZiS0dEAP8A/wD/oL2nkwAAAAlwSFlzAAAASAAAAEgARslrPgAAAAl2cEFnAAAAEAAAABAA
XMatwwAAAhdJREFUOMuVk81LVFEYxn/3zocfqVebUbCyTLyYRYwD0cemCIRyUVToLloERUFBbYpo
E7WIFv0TLaP6C2Y17oYWWQxRMwo5OUplkR/XOefMuW8LNYyZLB94eOE5L79zzns4johIPp/n+YtX
fPn6jaq1bKaI65LY3sHohXOk02mcNxMT8vjJU5TWbEUN8Ti3bl4n0tLW/qBcniW0ltBaxFrsWl3P
7IZ8PdNa82m6RPTDxyLGmLq7JDuaqVQCllbqn6I4OUU0CJYJw7BmMR6LcPvyURbLGR49q/71KlGj
dV3AlbEhBnog3mo5e8Tycrz+cKPamBrAiUOdnD/ZhlFziKpw7RS8LVry01IDcI3WbHRXu8OdS524
pgx6BlkJEKW4PxrSFP2z12iNq1UFrTVaaxDNw6vttDXMg/2O2AXC5UUkWKI7vsDdM+Z3X9Ws2tXG
YLTCaMWNMY8DfREAFpcUkzPC1JzL8kKAGM3xvoDD+1uJVX+ilEIptTpECUP8PXEGB/rIzw/iNPXj
de1jML0Xay3l6QKfZyewP95x8dhr7r0HpSoAODt7dktoQ0SEpsZGent78f1+fN/H9/sxxlAoFCkU
CxQKRUqlEkppXNddBXTv2CXrtH/JofYVoqnUQbLZ8f/+A85aFWAolYJcLiee50ksFtuSm7e1SCaT
EUREcrmcnB4ZkWQyKZ7nbepEIiHDw8OSzWZFROQX6PpZFxAtS8IAAAAldEVYdGNyZWF0ZS1kYXRl
ADIwMTAtMTItMjZUMTQ6NDk6MjErMDk6MDAHHBB1AAAAJXRFWHRtb2RpZnktZGF0ZQAyMDEwLTEy
LTI2VDE0OjQ5OjIxKzA5OjAwWK1mQQAAAABJRU5ErkJggolQTkcNChoKAAAADUlIRFIAAAAQAAAA
EAgGAAAAH/P/YQAAAAZiS0dEAP8A/wD/oL2nkwAAAAlwSFlzAAAASAAAAEgARslrPgAAAAl2cEFn
AAAAEAAAABAAXMatwwAAAo9JREFUOMuNks1rVGcUxn/ve+9kUuOdfIzamNHEMK3RVILQQAuCWURo
rSAtbsV20T/EP6O7FtxkkYWQKK7F4Kb1C6yoSVrNdDIm1YTMjDP3vfc9p4ubZEYopQceDhwOD89z
zmO89/rw0SNu3b5D5a8q3gv7ZXa7dkY2sIwMf8w3X3/F9PTnhL/+9oCff7nBeq2GMYb/U5sbm1TX
a8TOEQwMHbq+vLKKqqIiiAh+r3tBvKBds72der1OtVolfP78BWmadmnNVKgqI0cOkiRtNrc9Zt9H
x9fK6iphs/keVflAoqpSHOzjh+8maL59yk83WzRa8G8OwzRxiHQIFOjJBXw7O8b0qV50K2H1tWf+
riCiHRbNFIUucYgoZu/Yqlz44iiXzh3EpJuE0uLKl57lNc/93wVjOyYyApeguwpElTOf9HH1YkSU
e0O72cC/b1DMK9/PGP5c97zaUGwXg01cjHMxcRwz0Cf8ePkAJ47U0eRvSLehtYM06pw+1OTauZje
wBG7mCTJEDqX3eCjvOXqxQGmTwXUmwlxmmdrpw+z0ybiHXnbYqasvDgbcGPJEvvsHKFzDp96Tgz3
cvjwMM/efsaBwZP0D39KabKEpgnbG3/wrvaU5psnHD/6mMF8jcqWwRgwpWOjKiLkQkOhv5+xsTLl
cpnR0WOUSiVEhLVKhbXXa7xcXqHyaoV6o0Hqd1MxUjqu7XYLMFkaNXtXYC09+R5UwbkYEcVaizFm
P/LWGsLJydMs3VvCWkP3gzxK7OKu7Bl81/tEhKmpKVhYWNCJiQkNglDDMKdhLpf1/0AQhDo+Pq5z
c3NKmqa6uLios7MXtFgsahRFGhUKHUS7KBQ0iiIdGhrS8+dndH5+XpMk0X8AMTVx/inpU4cAAAAl
dEVYdGNyZWF0ZS1kYXRlADIwMTAtMTItMjZUMTQ6NDk6MjErMDk6MDAHHBB1AAAAJXRFWHRtb2Rp
ZnktZGF0ZQAyMDEwLTEyLTI2VDE0OjQ5OjIxKzA5OjAwWK1mQQAAAABJRU5ErkJggg==
'''.strip() | 45.12 | 76 | 0.843085 | [
"MIT"
] | Dafov/portfolio | tests/portfolio_projects/forms_test.py | 4,512 | Python |
import pytest
import numpy as np
import pandas as pd
from skippa import columns
from skippa.transformers.sklearn import(
SkippaSimpleImputer,
SkippaStandardScaler,
SkippaMinMaxScaler,
SkippaOneHotEncoder,
SkippaLabelEncoder,
SkippaOrdinalEncoder,
SkippaPCA
)
from skippa.utils import get_dummy_data
def test_simpleimputer_float(test_data):
X, _ = test_data
col_spec = columns(dtype_include='float')
si = SkippaSimpleImputer(cols=col_spec, strategy='median')
res = si.fit_transform(X)
assert isinstance(res, pd.DataFrame)
subset = res[col_spec(res)]
assert subset.isna().sum().sum() == 0
def test_simpleimputer_int(test_data):
X, _ = test_data
col_spec = columns(dtype_include='int')
si = SkippaSimpleImputer(cols=col_spec, strategy='median')
res = si.fit_transform(X)
assert isinstance(res, pd.DataFrame)
subset = res[col_spec(res)]
assert subset.isna().sum().sum() == 0
def test_simpleimputer_char(test_data):
X, _ = test_data
col_spec = columns(dtype_include='object')
si = SkippaSimpleImputer(cols=col_spec, strategy='most_frequent')
res = si.fit_transform(X)
assert isinstance(res, pd.DataFrame)
subset = res[col_spec(X)]
assert subset.isna().sum().sum() == 0
def test_standardscaler():
X, _ = get_dummy_data(nchar=0, ndate=0, nrows=10)
ss = SkippaStandardScaler(cols=columns())
res = ss.fit_transform(X)
threshold = 0.01
assert (np.abs(0 - res.mean()) < threshold).all()
def test_minmaxscaler():
X, _ = get_dummy_data(nchar=0, ndate=0, nrows=10)
mms = SkippaMinMaxScaler(cols=columns())
res = mms.fit_transform(X)
threshold = 0.01
assert (np.abs(res.min() - 0.) < threshold).all()
assert (np.abs(res.max() - 1.) < threshold).all()
def test_onehotencoder():
X, _ = get_dummy_data(nrows=10, nfloat=0, nint=0, nchar=1, ndate=0)
ohe = SkippaOneHotEncoder(cols=columns())
res = ohe.fit_transform(X)
n_distinct_values = X.iloc[:, 0].nunique(dropna=False)
assert res.shape[1] == n_distinct_values
def test_pca():
n_components = 3
X, _ = get_dummy_data(nrows=100, nfloat=10, nint=0, nchar=1, ndate=0, missing=False)
pca = SkippaPCA(cols=columns(dtype_include='float'), n_components=n_components)
res = pca.fit_transform(X)
assert pca.n_components_ == n_components
assert res.shape[1] == n_components + 1
expected_columns = [f'c{i}' for i in range(n_components)]
assert all([c in res.columns for c in expected_columns])
| 30.60241 | 88 | 0.692913 | [
"BSD-3-Clause"
] | data-science-lab-amsterdam/skippa | tests/test_sklearn.py | 2,540 | Python |
#!/usr/bin/env python
__author__ = 'Sergei F. Kliver'
import argparse
from RouToolPa.Parsers.VCF import CollectionVCF
from MACE.Routines import StatsVCF, Visualization
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input_file", action="store", dest="input", required=True,
help="Input vcf file with mutations.")
parser.add_argument("-o", "--output_prefix", action="store", dest="output_prefix",
required=True,
help="Prefix of output files")
parser.add_argument("-d", "--dpi", action="store", dest="dpi", type=int, default=200,
help="Dpi of figure")
parser.add_argument("-f", "--figsize", action="store", dest="figsize",
type=lambda s: map(int, s.split(",")),
default=(5, 5),
help="Size of figure in inches. X and Y values should be separated "
"by comma. Default: 5,5")
parser.add_argument("-e", "--output_formats", action="store", dest="output_formats",
type=lambda s: s.split(","),
default=["png"],
help="Comma-separated list of formats (supported by matlotlib) "
"of output figure.Default: png")
parser.add_argument("-l", "--title", action="store", dest="title",
default=None,
help="Title of figure. Default: not set")
parser.add_argument("-m", "--parsing_mode", action="store", dest="parsing_mode",
default="genotypes",
help="Parsing mode. Allowed: genotypes(default), 'coordinates_and_genotypes', 'complete'")
"""
parser.add_argument("-a", "--scaffold_white_list", action="store", dest="scaffold_white_list", default=[],
type=lambda s: s.split(","),
help="Comma-separated list of the only scaffolds to draw. Default: all")
parser.add_argument("-b", "--scaffold_black_list", action="store", dest="scaffold_black_list", default=[],
type=lambda s: s.split(","),
help="Comma-separated list of scaffolds to skip at drawing. Default: not set")
"""
args = parser.parse_args()
mutations = CollectionVCF(args.input, parsing_mode="genotypes")
StatsVCF.count_singletons(collection_vcf=mutations, output_prefix=args.output_prefix)
"""
Visualization.zygoty_bar_plot(StatsVCF.count_zygoty(mutations, outfile="%s.counts" % args.output_prefix),
args.output_prefix, extension_list=args.output_formats,
figsize=args.figsize,
dpi=args.dpi,
title=args.title)
""" | 51.903846 | 110 | 0.593183 | [
"Apache-2.0"
] | mahajrod/MACE | scripts/count_singletons.py | 2,699 | Python |
# _*_ coding: utf-8 _*_
__author__ = 'Di Meng'
__date__ = '1/3/2018 10:16 PM'
# _*_ coding: utf-8 _*_
__author__ = 'Di Meng'
__date__ = '1/3/2018 9:26 PM'
from tutorial.feature_functions import *
import pandas as pd
import plotly as py
import json
from plotly import tools
import plotly.graph_objs as go
#loading our data
df = pd.read_csv('EURUSD_hours.csv')
df.columns = ['date','open','high','low','close','volume']
df.date = pd.to_datetime(df.date,format='%d.%m.%Y %H:%M:%S.%f')
df = df.set_index(df.date)
df = df[['open','high','low','close','volume']]
df.drop_duplicates(keep=False)
df = df.iloc[:500]
#moving average
ma = df.close.rolling(center=False, window=30).mean()
# detrended = detrend(df, method='difference')
# f = fourier(df, [10, 15],method='difference')
#HA
# HAresults = candles(df, [1])
# HA = HAresults.candles[1]
#wad
results = wadl(df, [15])
line = results.wadl[15]
print(line['close'])
# draw grarphs
trace = go.Ohlc(x=df.index, open=df.open, high=df.high, low=df.low, close=df.close, name='Currency Quote')
trace1 = go.Scatter(x=df.index, y=ma)
trace2 = go.Scatter(x=df.index, y=(line.close.to_json()))
# linear detrand plot
# trace2 = go.Scatter(x=df.index, y=detrended)
# difference detrand plot
# trace2 = go.Scatter(x=df.index, y=detrended)
data = [trace, trace1, trace2]
fig = tools.make_subplots(rows=2,cols=1,shared_xaxes=True)
fig.append_trace(trace,1,1)
fig.append_trace(trace1,1,1)
fig.append_trace(trace2,2,1)
py.offline.plot(fig, filename="test.html") | 23.546875 | 106 | 0.696085 | [
"MIT"
] | leonsariel/python | finance/tutorial/tester.py | 1,507 | Python |
#!/usr/bin/env python3
"""Run AFL repeatedly with externally supplied generated packet from STDIN."""
import logging
import sys
from ryu.controller import dpset
from faucet import faucet
from faucet import faucet_experimental_api
import afl
import fake_packet
ROUNDS = 1
logging.disable(logging.CRITICAL)
def main():
"""Run AFL repeatedly with externally supplied generated packet from STDIN."""
application = faucet.Faucet(
dpset=dpset.DPSet(),
faucet_experimental_api=faucet_experimental_api.FaucetExperimentalAPI())
application.start()
# make sure dps are running
if application.valves_manager is not None:
for valve in list(application.valves_manager.valves.values()):
state = valve.dp.dyn_finalized
valve.dp.dyn_finalized = False
valve.dp.running = True
valve.dp.dyn_finalized = state
while afl.loop(ROUNDS):
# receive input from afl
rcv = sys.stdin.read()
data = None
try:
data = bytearray.fromhex(rcv) # pytype: disable=missing-parameter
except (ValueError, TypeError):
continue
# create fake packet
_dp = fake_packet.Datapath(1)
msg = fake_packet.Message(datapath=_dp, cookie=15243729, port=1, data=data, in_port=1)
pkt = fake_packet.RyuEvent(msg)
# send fake packet to faucet
application.packet_in_handler(pkt)
if __name__ == "__main__":
main()
| 28.403846 | 94 | 0.67434 | [
"Apache-2.0"
] | 1ndochine/faucet | tests/fuzzer/fuzz_packet.py | 1,477 | Python |
import os
from connexion import App
from flask_marshmallow import Marshmallow
from flask_sqlalchemy import SQLAlchemy
basedir = os.path.abspath(os.path.dirname(__file__))
conn = App(__name__, specification_dir='./')
app = conn.app
postgres_url = 'postgres://postgres:[email protected]:54320/web_service_db'
app.config["SQLALCHEMY_ECHO"] = True
app.config["SQLALCHEMY_DATABASE_URI"] = postgres_url
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.config["UPLOAD_FOLDER"] = basedir + os.sep + "web_service_files"
app.config["DATABASE"] = "web_service_db"
app.config["PORT"] = 5433
app.config["USERNAME"] = "postgres"
app.config["HOSTNAME"] = "10.5.95.65"
db = SQLAlchemy(app)
ma = Marshmallow(app)
| 25.464286 | 75 | 0.760168 | [
"MIT"
] | celinekeisja/capstone | web_service/config.py | 713 | Python |
import logging
import warnings
lcb_min_version_baseline = (2, 9, 0)
def get_lcb_min_version():
result = lcb_min_version_baseline
try:
# check the version listed in README.rst isn't greater than lcb_min_version
# bump it up to the specified version if it is
import docutils.parsers.rst
import docutils.utils
import docutils.frontend
parser = docutils.parsers.rst.Parser()
with open("README.rst") as README:
settings = docutils.frontend.OptionParser().get_default_values()
settings.update(
dict(tab_width=4, report_level=1, pep_references=False, rfc_references=False, syntax_highlight=False),
docutils.frontend.OptionParser())
document = docutils.utils.new_document(README.name, settings=settings)
parser.parse(README.read(), document)
readme_min_version = tuple(
map(int, document.substitution_defs.get("libcouchbase_version").astext().split('.')))
result = max(result, readme_min_version)
logging.info("min version is {}".format(result))
except Exception as e:
warnings.warn("problem: {}".format(e))
return result | 38.46875 | 118 | 0.656377 | [
"Apache-2.0"
] | griels/couchbase-python-client-ng | lcb_version.py | 1,231 | Python |
# Generated by Django 2.2.5 on 2019-11-10 02:46
from django.db import migrations
import django.db.models.deletion
import smart_selects.db_fields
class Migration(migrations.Migration):
dependencies = [
('main_site', '0014_auto_20191109_2038'),
]
operations = [
migrations.AlterField(
model_name='mushroomspecimen',
name='genus',
field=smart_selects.db_fields.ChainedForeignKey(chained_field='family', chained_model_field='family', default=0, on_delete=django.db.models.deletion.CASCADE, to='main_site.Genus', verbose_name='Género'),
),
migrations.AlterField(
model_name='plantspecimen',
name='genus',
field=smart_selects.db_fields.ChainedForeignKey(chained_field='family', chained_model_field='family', default=0, on_delete=django.db.models.deletion.CASCADE, to='main_site.Genus', verbose_name='Género'),
),
]
| 36.230769 | 215 | 0.687898 | [
"MIT"
] | Javen17/plants_api | plants_api/main_site/migrations/0015_auto_20191109_2046.py | 944 | Python |
from graphene import ObjectType, String, Schema
class ExampleQuery(ObjectType):
hello = String()
def resolve_hello(self):
return "Hello"
class RootQuery(ExampleQuery, ObjectType):
pass
schema = Schema(query=RootQuery) | 16.266667 | 47 | 0.717213 | [
"MIT"
] | jehalladay/React-Playground | demo/graphQLDemos/spacy/schema.py | 244 | Python |
import requests
import json
from datetime import datetime, timezone
from . utils import _extract_videos_necessary_details, _save_video_detils_in_db
from .models import ApiKeys
from . import config
def _get_api_key(): #getting different key w.r.t last used every time cron job starts.(load balanced)
new_key = ApiKeys.objects.all().order_by('last_used').first()
_reponse = ApiKeys.objects.filter(
api_key=new_key.api_key).update(last_used=datetime.now(timezone.utc))
return new_key.api_key
def get_recent_youtube_videos_details():
params = {**config.params}
params.update({'key': _get_api_key()})
print('Prameters: ', params)
youtube_api_response = requests.get(
config.YOUTUBE_SEARCH_URL, params=params)
print('Youtube API Response: ', youtube_api_response.text)
youtube_api_response = json.loads(youtube_api_response.text)
videos_details = _extract_videos_necessary_details(
youtube_api_response.get('items', []))
if videos_details:
_response = _save_video_detils_in_db(videos_details)
return videos_details
| 37.689655 | 101 | 0.755718 | [
"MIT"
] | aryamaan98/Youtube-Data-API-Integration | youtubeDataApi/searchApi/cron.py | 1,093 | Python |