max_stars_repo_path
stringlengths 4
197
| max_stars_repo_name
stringlengths 6
120
| max_stars_count
int64 0
191k
| id
stringlengths 1
8
| content
stringlengths 6
964k
| score
float64 -0.88
3.95
| int_score
int64 0
4
|
---|---|---|---|---|---|---|
sensirion_shdlc_sensorbridge/commands/stop_repeated_transceive.py | Sensirion/python-shdlc-sensorbridge | 0 | 76804 | # -*- coding: utf-8 -*-
# (c) Copyright 2020 Sensirion AG, Switzerland
##############################################################################
##############################################################################
# _____ _ _ _______ _____ ____ _ _
# / ____| /\ | | | |__ __|_ _/ __ \| \ | |
# | | / \ | | | | | | | || | | | \| |
# | | / /\ \| | | | | | | || | | | . ` |
# | |____ / ____ \ |__| | | | _| || |__| | |\ |
# \_____/_/ \_\____/ |_| |_____\____/|_| \_|
#
# THIS FILE IS AUTOMATICALLY GENERATED AND MUST NOT BE EDITED MANUALLY!
#
# Generator: sensirion-shdlc-interface-generator 0.5.1
# Product: Sensor Bridge
# Version: 0.1.0
#
##############################################################################
##############################################################################
# flake8: noqa
from __future__ import absolute_import, division, print_function
from sensirion_shdlc_driver.command import ShdlcCommand
from struct import pack, unpack
import logging
log = logging.getLogger(__name__)
class SensorBridgeCmdStopRepeatedTransceiveBase(ShdlcCommand):
"""
SHDLC command 0x51: "Stop Repeated Transceive".
"""
def __init__(self, *args, **kwargs):
super(SensorBridgeCmdStopRepeatedTransceiveBase, self).__init__(
0x51, *args, **kwargs)
class SensorBridgeCmdStopRepeatedTransceive(SensorBridgeCmdStopRepeatedTransceiveBase):
def __init__(self, handle):
"""
Stop Repeated Transceive Command
Stops a repeated transceive operation.
:param int handle:
The handle of the repeated transceive which should be stopped. Pass
0xFF to stop all repeated transceives.
"""
super(SensorBridgeCmdStopRepeatedTransceive, self).__init__(
data=b"".join([pack(">B", handle)]),
max_response_time=0.05,
post_processing_time=0.0,
min_response_length=0,
max_response_length=0
)
| 0.435547 | 0 |
io/swig/io/vexport.py | ljktest/siconos | 137 | 76932 | <reponame>ljktest/siconos
#!/usr/bin/env @PYTHON_EXECUTABLE@
"""
Description: Export a Siconos mechanics-IO HDF5 file in VTK format.
"""
# Lighter imports before command line parsing
from __future__ import print_function
import sys
import os
import getopt
#
# a replacement for vview --vtk-export
#
def usage(long=False):
print(__doc__); print()
print('Usage: {0} [--help] [--version] [--ascii] <HDF5>'
.format(os.path.split(sys.argv[0])[1]))
if long:
print()
print("""Options:
--help display this message
--version display version information
--ascii export file in ascii format
""")
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], '',
['help','version','ascii'])
except getopt.GetoptError as err:
sys.stderr.write('{0}\n'.format(str(err)))
usage()
exit(2)
ascii_mode = False
for o, a in opts:
if o == '--help':
usage(long=True)
exit(0)
if o == '--version':
print('{0} @SICONOS_VERSION@'.format(os.path.split(sys.argv[0])[1]))
exit(0)
if o in ('--ascii'):
ascii_mode = True
min_time = None
max_time = None
cf_scale_factor = 1
normalcone_ratio = 1
time_scale_factor = 1
vtk_export_mode = True
if len(args) > 0:
io_filename = args[0]
else:
usage()
exit(1)
# Heavier imports after command line parsing
import vtk
from vtk.util import numpy_support
from math import atan2, pi
import bisect
from numpy.linalg import norm
import numpy
import random
from siconos.io.mechanics_hdf5 import MechanicsHdf5
# attach velocity
# contact points and associated forces are embedded in on a PolyData source
class UnstructuredGridSource(vtk.vtkProgrammableSource):
def GetOutputPort(self):
# 3: UnstructuredGridOutput for vtkProgrammableSource
return vtk.vtkProgrammableSource.GetOutputPort(self, 3)
class ConvexSource(UnstructuredGridSource):
def __init__(self, convex, points):
self._convex = convex
self._points = points
self.SetExecuteMethod(self.method)
def method(self):
output = self.GetUnstructuredGridOutput()
output.Allocate(1, 1)
output.InsertNextCell(
convex.GetCellType(), self._convex.GetPointIds())
output.SetPoints(self._points)
def add_compatiblity_methods(obj):
"""
Add missing methods in previous VTK versions.
"""
if hasattr(obj, 'SetInput'):
obj.SetInputData = obj.SetInput
if hasattr(obj, 'AddInput'):
obj.AddInputData = obj.AddInput
transforms = dict()
transformers = dict()
data_connectors_v = dict()
data_connectors_t = dict()
data_connectors_d = dict()
big_data_source = vtk.vtkMultiBlockDataGroupFilter()
add_compatiblity_methods(big_data_source)
big_data_writer = vtk.vtkXMLMultiBlockDataWriter()
add_compatiblity_methods(big_data_writer)
contactors = dict()
offsets = dict()
vtkmath = vtk.vtkMath()
class Quaternion():
def __init__(self, *args):
self._data = vtk.vtkQuaternion[float](*args)
def __mul__(self, q):
r = Quaternion()
vtkmath.MultiplyQuaternion(self._data, q._data, r._data)
return r
def __getitem__(self, i):
return self._data[i]
def conjugate(self):
r = Quaternion((self[0], self[1], self[2], self[3]))
r._data.Conjugate()
return r
def rotate(self, v):
pv = Quaternion((0, v[0], v[1], v[2]))
rv = self * pv * self.conjugate()
# assert(rv[0] == 0)
return [rv[1], rv[2], rv[3]]
def axisAngle(self):
r = [0, 0, 0]
a = self._data.GetRotationAngleAndAxis(r)
return r, a
def set_position(instance, q0, q1, q2, q3, q4, q5, q6):
q = Quaternion((q3, q4, q5, q6))
for transform, offset in zip(transforms[instance], offsets[instance]):
p = q.rotate(offset[0])
r = q * Quaternion(offset[1])
transform.Identity()
transform.Translate(q0 + p[0], q1 + p[1], q2 + p[2])
axis, angle = r.axisAngle()
transform.RotateWXYZ(angle * 180. / pi,
axis[0],
axis[1],
axis[2])
set_positionv = numpy.vectorize(set_position)
def build_set_velocity(dico):
def set_velocity(instance, v0, v1, v2, v3, v4, v5):
if instance in dico:
dico[instance]._data[:] = [v0, v1, v2, v3, v4, v5]
dico[instance]._connector.Update()
set_velocityv = numpy.vectorize(set_velocity)
return set_velocityv
def build_set_translation(dico):
def set_translation(instance, x0, x1, x2 ):
if instance in dico:
dico[instance]._data[:] = [x0, x1, x2]
dico[instance]._connector.Update()
set_translationv = numpy.vectorize(set_translation)
return set_translationv
def build_set_displacement(dico):
def set_displacement(instance, x0, x1, x2 ):
if instance in dico:
dico[instance]._data[:] = [x0, x1, x2]
dico[instance]._connector.Update()
set_displacementv = numpy.vectorize(set_displacement)
return set_displacementv
def step_reader(step_string):
from OCC.StlAPI import StlAPI_Writer
from OCC.STEPControl import STEPControl_Reader
from OCC.BRep import BRep_Builder
from OCC.TopoDS import TopoDS_Compound
from OCC.IFSelect import IFSelect_RetDone, IFSelect_ItemsByEntity
builder = BRep_Builder()
comp = TopoDS_Compound()
builder.MakeCompound(comp)
stl_writer = StlAPI_Writer()
stl_writer.SetASCIIMode(True)
with io.tmpfile(contents=io.shapes()[shape_name][:][0]) as tmpfile:
step_reader = STEPControl_Reader()
status = step_reader.ReadFile(tmpfile[1])
if status == IFSelect_RetDone: # check status
failsonly = False
step_reader.PrintCheckLoad(failsonly, IFSelect_ItemsByEntity)
step_reader.PrintCheckTransfer(failsonly, IFSelect_ItemsByEntity)
ok = step_reader.TransferRoot(1)
nbs = step_reader.NbShapes()
l = []
for i in range(1, nbs + 1):
shape = step_reader.Shape(i)
builder.Add(comp, shape)
with io.tmpfile(suffix='.stl') as tmpf:
stl_writer.Write(comp, tmpf[1])
tmpf[0].flush()
reader = vtk.vtkSTLReader()
reader.SetFileName(tmpf[1])
reader.Update()
return reader
def brep_reader(brep_string, indx):
from OCC.StlAPI import StlAPI_Writer
from OCC.BRepTools import BRepTools_ShapeSet
shape_set = BRepTools_ShapeSet()
shape_set.ReadFromString(brep_string)
shape = shape_set.Shape(shape_set.NbShapes())
location = shape_set.Locations().Location(indx)
shape.Location(location)
stl_writer = StlAPI_Writer()
with io.tmpfile(suffix='.stl') as tmpf:
stl_writer.Write(shape, tmpf[1])
tmpf[0].flush()
reader = vtk.vtkSTLReader()
reader.SetFileName(tmpf[1])
reader.Update()
return reader
refs = []
refs_attrs = []
shape = dict()
pos = dict()
instances = dict()
with MechanicsHdf5(io_filename=io_filename, mode='r') as io:
def load():
ispos_data = io.static_data()
idpos_data = io.dynamic_data()
ivelo_data = io.velocities_data()
icf_data = io.contact_forces_data()[:]
isolv_data = io.solver_data()
return ispos_data, idpos_data, ivelo_data, icf_data, isolv_data
spos_data, dpos_data, velo_data, cf_data, solv_data = load()
class DataConnector():
def __init__(self, instance, data_name='velocity', data_size=6):
self._instance = instance
self._data_name = data_name
self._data_size = data_size
self._connector = vtk.vtkProgrammableFilter()
self._connector.SetExecuteMethod(self.method)
self._data = numpy.zeros(data_size)
self._vtk_data = vtk.vtkFloatArray()
self._vtk_data.SetName(data_name)
self._vtk_data.SetNumberOfComponents(data_size)
self._vtk_data.SetNumberOfTuples(1)
def method(self):
input = self._connector.GetInput()
output = self._connector.GetOutput()
output.ShallowCopy(input)
if output.GetFieldData().GetArray(self._data_name) is None:
output.GetFieldData().AddArray(self._vtk_data)
data = self._data
data_t = tuple(data[0:self._data_size])
output.GetFieldData().GetArray(self._data_name).SetTuple(
0, data_t)
# contact forces provider
class ContactInfoSource():
def __init__(self, data):
self._data = None
if data is not None:
if len(data) > 0:
self._data = data
else:
self._data = None
if self._data is not None:
self._time = min(self._data[:, 0])
else:
self._time = 0
self._contact_source_a = vtk.vtkProgrammableSource()
self._contact_source_b = vtk.vtkProgrammableSource()
self._contact_source_a.SetExecuteMethod(self.method)
self._contact_source_b.SetExecuteMethod(self.method)
def method(self):
# multiblock += contact points
output_a = self._contact_source_a.GetPolyDataOutput()
output_b = self._contact_source_b.GetPolyDataOutput()
id_f = numpy.where(
abs(self._data[:, 0] - self._time) < 1e-15)[0]
self.cpa_export = self._data[
id_f, 2:5].copy()
self.cpb_export = self._data[
id_f, 5:8].copy()
self.cn_export = self._data[
id_f, 8:11].copy()
self.cf_export = self._data[
id_f, 11:14].copy()
self.cpa_ = numpy_support.numpy_to_vtk(
self.cpa_export)
self.cpa_.SetName('contact_positions_A')
self.cpb_ = numpy_support.numpy_to_vtk(
self.cpb_export)
self.cpb_.SetName('contact_positions_B')
self.cn_ = numpy_support.numpy_to_vtk(
self.cn_export)
self.cn_.SetName('contact_normals')
self.cf_ = numpy_support.numpy_to_vtk(
self.cf_export)
self.cf_.SetName('contact_forces')
output_a.Allocate(len(self.cpa_export), 1)
cpa_points = vtk.vtkPoints()
cpa_points.SetNumberOfPoints(len(self.cpa_export))
cpa_points.SetData(self.cpa_)
output_a.SetPoints(cpa_points)
# normal and forces are attached to A points
output_a.GetPointData().AddArray(self.cn_)
output_a.GetPointData().AddArray(self.cf_)
output_b.Allocate(len(self.cpb_export), 1)
cpb_points = vtk.vtkPoints()
cpb_points.SetNumberOfPoints(len(self.cpb_export))
cpb_points.SetData(self.cpb_)
output_b.SetPoints(cpb_points)
# Step 2
#
#
readers = dict()
vtk_reader = {'vtp': vtk.vtkXMLPolyDataReader,
'stl': vtk.vtkSTLReader}
for shape_name in io.shapes():
shape_type = io.shapes()[shape_name].attrs['type']
if shape_type in ['vtp', 'stl']:
with io.tmpfile() as tmpf:
tmpf[0].write(str(io.shapes()[shape_name][:][0]))
tmpf[0].flush()
reader = vtk_reader[shape_type]()
reader.SetFileName(tmpf[1])
reader.Update()
readers[shape_name] = reader
elif shape_type in ['brep']:
# try to find an associated shape
if 'associated_shape' in io.shapes()[shape_name].attrs:
associated_shape = \
io.shapes()[shape_name].\
attrs['associated_shape']
else:
if 'brep' in io.shapes()[shape_name].attrs:
brep = io.shapes()[shape_name].attrs['brep']
else:
brep = shape_name
reader = brep_reader(str(io.shapes()[brep][:][0]),
io.shapes()[brep].attrs['occ_indx'])
readers[shape_name] = reader
elif shape_type in ['stp', 'step']:
# try to find an associated shape
if 'associated_shape' in io.shapes()[shape_name].attrs:
associated_shape = \
io.shapes()[shape_name].\
attrs['associated_shape']
# delayed
else:
reader = step_reader(str(io.shapes()[shape_name][:]))
readers[shape_name] = reader
elif shape_type == 'convex':
# a convex shape
points = vtk.vtkPoints()
convex = vtk.vtkConvexPointSet()
data = io.shapes()[shape_name][:]
convex.GetPointIds().SetNumberOfIds(data.shape[0])
for id_, vertice in enumerate(io.shapes()[shape_name][:]):
points.InsertNextPoint(vertice[0], vertice[1], vertice[2])
convex.GetPointIds().SetId(id_, id_)
readers[shape_name] = ConvexSource(convex, points)
else:
assert shape_type == 'primitive'
primitive = io.shapes()[shape_name].attrs['primitive']
attrs = io.shapes()[shape_name][:][0]
if primitive == 'Sphere':
source = vtk.vtkSphereSource()
source.SetRadius(attrs[0])
elif primitive == 'Cone':
source = vtk.vtkConeSource()
source.SetRadius(attrs[0])
source.SetHeight(attrs[1])
source.SetResolution(15)
source.SetDirection(0, 1, 0) # needed
elif primitive == 'Cylinder':
source = vtk.vtkCylinderSource()
source.SetResolution(15)
source.SetRadius(attrs[0])
source.SetHeight(attrs[1])
# source.SetDirection(0,1,0)
elif primitive == 'Box':
source = vtk.vtkCubeSource()
source.SetXLength(attrs[0])
source.SetYLength(attrs[1])
source.SetZLength(attrs[2])
elif primitive == 'Capsule':
sphere1 = vtk.vtkSphereSource()
sphere1.SetRadius(attrs[0])
sphere1.SetCenter(0, attrs[1] / 2, 0)
sphere1.SetThetaResolution(15)
sphere1.SetPhiResolution(15)
sphere1.Update()
sphere2 = vtk.vtkSphereSource()
sphere2.SetRadius(attrs[0])
sphere2.SetCenter(0, -attrs[1] / 2, 0)
sphere2.SetThetaResolution(15)
sphere2.SetPhiResolution(15)
sphere2.Update()
cylinder = vtk.vtkCylinderSource()
cylinder.SetRadius(attrs[0])
cylinder.SetHeight(attrs[1])
cylinder.SetResolution(15)
cylinder.Update()
data = vtk.vtkMultiBlockDataSet()
data.SetNumberOfBlocks(3)
data.SetBlock(0, sphere1.GetOutput())
data.SetBlock(1, sphere2.GetOutput())
data.SetBlock(2, cylinder.GetOutput())
source = vtk.vtkMultiBlockDataGroupFilter()
add_compatiblity_methods(source)
source.AddInputData(data)
readers[shape_name] = source
for instance_name in io.instances():
instance = int(io.instances()[instance_name].attrs['id'])
contactors[instance] = []
transforms[instance] = []
offsets[instance] = []
for contactor_instance_name in io.instances()[instance_name]:
contactor_name = io.instances()[instance_name][
contactor_instance_name].attrs['name']
contactors[instance].append(contactor_name)
transform = vtk.vtkTransform()
transformer = vtk.vtkTransformFilter()
if contactor_name in readers:
transformer.SetInputConnection(
readers[contactor_name].GetOutputPort())
else:
print ('WARNING: cannot find a shape source for instance:',
instance)
transformer.SetTransform(transform)
transformers[contactor_name] = transformer
data_connectors_v[instance] = DataConnector(instance)
data_connectors_v[instance]._connector.SetInputConnection(
transformer.GetOutputPort())
data_connectors_v[instance]._connector.Update()
big_data_source.AddInputConnection(
data_connectors_v[instance]._connector.GetOutputPort())
data_connectors_t[instance] = DataConnector(instance, data_name='translation', data_size=3)
data_connectors_t[instance]._connector.SetInputConnection(
transformer.GetOutputPort())
data_connectors_t[instance]._connector.Update()
big_data_source.AddInputConnection(
data_connectors_t[instance]._connector.GetOutputPort())
data_connectors_d[instance] = DataConnector(instance, data_name='displacement', data_size=3)
data_connectors_d[instance]._connector.SetInputConnection(
transformer.GetOutputPort())
data_connectors_d[instance]._connector.Update()
big_data_source.AddInputConnection(
data_connectors_d[instance]._connector.GetOutputPort())
transforms[instance].append(transform)
offsets[instance].append(
(io.instances()[
instance_name][
contactor_instance_name].attrs['translation'],
io.instances()[instance_name][contactor_instance_name].attrs['orientation']))
pos_data = dpos_data[:].copy()
spos_data = spos_data[:].copy()
velo_data = velo_data[:].copy()
set_velocityv = build_set_velocity(data_connectors_v)
set_translationv = build_set_translation(data_connectors_t)
set_displacementv = build_set_displacement(data_connectors_d)
times = list(set(dpos_data[:, 0]))
times.sort()
contact_info_source = ContactInfoSource(cf_data)
pveloa = DataConnector(0)
pvelob = DataConnector(0)
pveloa._connector.SetInputConnection(
contact_info_source._contact_source_a.GetOutputPort())
pvelob._connector.SetInputConnection(
contact_info_source._contact_source_a.GetOutputPort())
big_data_source.AddInputConnection(
pveloa._connector.GetOutputPort())
big_data_source.AddInputConnection(
pvelob._connector.GetOutputPort())
big_data_writer.SetInputConnection(big_data_source.GetOutputPort())
ntime = len(times)
k=0
packet= int(ntime/100)+1
for time in times:
k=k+1
if (k%packet == 0):
sys.stdout.write('.')
index = bisect.bisect_left(times, time)
index = max(0, index)
index = min(index, len(times) - 1)
contact_info_source._time = times[index]
# fix: should be called by contact_source?
contact_info_source.method()
id_t = numpy.where(pos_data[:, 0] == times[index])
if numpy.shape(spos_data)[0] > 0:
set_positionv(spos_data[:, 1], spos_data[:, 2],
spos_data[:, 3],
spos_data[:, 4], spos_data[:, 5],
spos_data[:, 6],
spos_data[:, 7], spos_data[:, 8])
set_positionv(
pos_data[id_t, 1], pos_data[id_t, 2], pos_data[id_t, 3],
pos_data[id_t, 4], pos_data[id_t, 5], pos_data[id_t, 6],
pos_data[id_t, 7], pos_data[id_t, 8])
id_tv = numpy.where(velo_data[:, 0] == times[index])
set_velocityv(
velo_data[id_tv, 1],
velo_data[id_tv, 2],
velo_data[id_tv, 3],
velo_data[id_tv, 4],
velo_data[id_tv, 5],
velo_data[id_tv, 6],
velo_data[id_tv, 7])
set_translationv(
pos_data[id_t, 1],
pos_data[id_t, 2],
pos_data[id_t, 3],
pos_data[id_t, 4],
)
# set_displacementv(
# pos_data[id_t, 1],
# pos_data[id_t, 2]- pos_data[0, 2],
# pos_data[id_t, 3]- pos_data[0, 3],
# pos_data[id_t, 4]- pos_data[0, 4]
# ) # should be w.r.t initial position
big_data_writer.SetFileName('{0}-{1}.{2}'.format(os.path.splitext(
os.path.basename(io_filename))[0],
index, big_data_writer.GetDefaultFileExtension()))
big_data_writer.SetTimeStep(times[index])
big_data_source.Update()
if ascii_mode:
big_data_writer.SetDataModeToAscii()
big_data_writer.Write()
print(' ')
| 1.398438 | 1 |
umake/test.py | mlbo/cmake_examples | 84 | 77060 | <filename>umake/test.py<gh_stars>10-100
#!/usr/bin/env python
#coding: utf-8
from umake import CMake
#cmake = CMake('3.15', 'hello')
#cmake.add_library('hello', ['src/hello.h', 'src/hello.cpp'])
#cmake.add_executable('demo', ['src/main.cpp'])
#cmake.target_link_libraries('demo', ['hello'])
cmake = CMake('3.15', 'hello')
hello = CMake.Target.Library('hello')
hello.add_dep_files([
'inc/hello.h',
'src/hello.cpp'
])
hello.add_include_dir('inc', 'PUBLIC')
demo = CMake.Target.Executable('demo')
demo.add_dep_file(
'test/main.cpp'
)
demo.add_include_dir('inc', 'PUBLIC')
demo.add_dep_lib('hello')
#opencv_pkg = CMake.find_package('OpenCV')
#demo.add_dep_lib(opencv_pkg)
cmake.add_target(hello)
cmake.add_target(demo)
cmake.dump()
| 1.164063 | 1 |
tests/unit/plugins/filter/test_foo.py | pedrohdz-scrap/ansible-collection-devenv | 0 | 77188 | from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from plugins.filter.normalize_pyvenv_apps import convert_string
def test_convert_string():
assert convert_string('foo') == {'name': 'foo'}
def test_foo():
assert True
| 0.972656 | 1 |
test/test_cell.py | chasembowers/py-petridish | 0 | 77316 | <reponame>chasembowers/py-petridish
import unittest
from mock import MagicMock
from petridish.cell import BasicCell, Cell
from petridish.energized import Energized
from petridish.point import Point
from petridish.actor import Actor
from petridish.resource import Resource
class TestBasicCell(unittest.TestCase):
_X_EQUALS = 52
_Y_EQUALS = 78
def setUp(self):
self._location = Point()
self._actor = Actor()
self._energized = Energized()
self._cell = BasicCell(self._actor, self._energized, self._location)
self._location.moveLeft = MagicMock()
self._location.moveRight = MagicMock()
self._location.moveUp = MagicMock()
self._location.moveDown = MagicMock()
self._location.moveTo = MagicMock()
self._location.isLeftOf = MagicMock()
self._location.isRightOf = MagicMock()
self._location.isBelow = MagicMock()
self._location.isAbove = MagicMock()
self._actor.act = MagicMock()
self._actor.child = MagicMock(return_value=Actor())
self._energized.consumeEnergy = MagicMock()
self._energized.releaseEnergy = MagicMock()
self._energized.energy = MagicMock(return_value=23)
self._energized.child = MagicMock(return_value=Energized())
def test_moveLeft(self):
self._cell.moveLeft()
self._location.moveLeft.assert_called_with()
def test_moveRight(self):
self._cell.moveRight()
self._location.moveRight.assert_called_with()
def test_moveUp(self):
self._cell.moveUp()
self._location.moveUp.assert_called_with()
def test_moveDown(self):
self._cell.moveDown()
self._location.moveDown.assert_called_with()
def test_moveTo(self):
coordinates = (8,7)
self._cell.moveTo(coordinates)
self._location.moveTo.assert_called_with(coordinates)
def test_isLeftOf(self):
self._cell.isLeftOf(self._X_EQUALS)
self._location.isLeftOf.assert_called_with(self._X_EQUALS)
def test_isRightOf(self):
self._cell.isRightOf(self._X_EQUALS)
self._location.isRightOf.assert_called_with(self._X_EQUALS)
def test_isBelow(self):
self._cell.isBelow(self._Y_EQUALS)
self._location.isBelow.assert_called_with(self._Y_EQUALS)
def test_isAbove(self):
self._cell.isAbove(self._Y_EQUALS)
self._location.isAbove.assert_called_with(self._Y_EQUALS)
def test_act(self):
cells = [self._cell, Cell(), Cell()]
resources = [Resource()]
self._cell.act(cells, resources)
self._actor.act.assert_called_with(self._cell, cells, resources)
def test_consumeSomeEnergy(self):
someEnergy = 37
self._cell.consumeEnergy(someEnergy)
self._energized.consumeEnergy.assert_called_with(someEnergy)
def test_releaseSomeEnergy(self):
someEnergy = 34
self._cell.releaseEnergy(someEnergy)
self._energized.releaseEnergy.assert_called_with(someEnergy)
def test_getEnergy(self):
assert self._cell.energy() == self._energized.energy()
self._energized.energy.assert_called_with()
def test_producesChild(self):
child = self._cell.child()
assert type(child) == type(self._cell)
assert child != self._cell
self._energized.child.assert_called_with()
self._actor.child.assert_called_with()
assert child.coordinates() == self._cell.coordinates()
assert child._location != self._cell._location
if __name__ == '__main__':
unittest.main()
| 1.617188 | 2 |
experimental/dang/esp32/script_test_nmpc_qpoases/test_nmpc_qpoases.py | mindThomas/acados | 322 | 77444 | #!/usr/bin/env python
# Tested with both Python 2.7.6 and Python 3.4.3
#
# This Python code collects the source code for testing acados
# on microcontrollers, putting all the necessary C files in
# one directory, and header files in the sub-directory include.
#
# The idea is that when compiling the testing code of acados for
# embedded platforms, when "make" does not fully function like
# on standard Linux platform, all the source code available in
# one directory would allow the compiler to process the code
# easier.
#
# To use for ESP32:
#
# Example usage:
# Assume the source directory of acados is: ~/acados
# The target folder to be created is: chen_nmpc_qpoases
# This command should be used:
# python test_nmpc_qpoases.py ~/acados chen_nmpc_qpoases
#
# Author: <NAME>
# Date: 2017.04.03
import sys
import os
import glob
from subprocess import call
from os.path import join
print('Running python script to grab chen_nmpc_qpoases...')
print(sys.version) # get python version, for debugging
if len(sys.argv)!= 3:
raise SyntaxError('This script needs exactly 2 arguments: \n \
test_nmpc_qpoases.py <acados_top_dir> <new_target_dir>\n \
Example:\n \
test_nmpc_qpoases.py ~/acados chen_nmpc_qpoases')
# 1. Bring all necessary files to one directory.
top_dir = str(sys.argv[1]).rstrip('/') # no trailing / in top_dir
target_dir = str(sys.argv[2]).rstrip('/') # no trailing / in target_dir
# List of file to collect
# Note: this hard-coded path doesnot work with Windows
workingcodefiles = [\
'examples/c/chen_nmpc_qpoases.c', \
'examples/c/Chen_model/chen_model.c', \
'acados/utils/print.c', \
'acados/utils/timing.c', \
'acados/ocp_qp/condensing.c', \
'acados/ocp_qp/condensing_helper_functions.c', \
'acados/ocp_qp/ocp_qp_condensing_qpoases.c', \
'acados/sim/sim_erk_integrator.c', \
'external/hpmpc/auxiliary/d_aux_extern_depend_lib4.c', \
'external/blasfeo/auxiliary/i_aux_extern_depend_lib.c', \
'external/qpOASES/src/Constraints.c', \
'external/qpOASES/src/Bounds.c', \
'external/qpOASES/src/Flipper.c', \
'external/qpOASES/src/Indexlist.c', \
'external/qpOASES/src/Matrices.c', \
'external/qpOASES/src/MessageHandling.c', \
'external/qpOASES/src/Options.c', \
'external/qpOASES/src/QProblem.c', \
'external/qpOASES/src/QProblemB.c', \
'external/qpOASES/src/Utils.c' \
]
workingheaderfiles = [\
'examples/c/Chen_model/chen_model.h', \
'acados/ocp_qp/ocp_qp_common.h', \
'acados/ocp_qp/condensing.h', \
'acados/ocp_qp/ocp_qp_condensing_qpoases.h', \
'acados/sim/sim_common.h', \
'acados/sim/sim_erk_integrator.h', \
'acados/sim/sim_collocation.h', \
'acados/sim/sim_rk_common.h', \
'acados/utils/print.h', \
'acados/utils/types.h', \
'acados/utils/timing.h', \
'external/hpmpc/include/aux_d.h', \
'external/hpmpc/include/block_size.h', \
'external/hpmpc/include/kernel_d_lib4.h', \
'external/blasfeo/include/blasfeo_i_aux.h', \
'external/qpOASES/include/qpOASES_e/Bounds.h', \
'external/qpOASES/include/qpOASES_e/Constants.h', \
'external/qpOASES/include/qpOASES_e/ConstraintProduct.h', \
'external/qpOASES/include/qpOASES_e/Constraints.h', \
'external/qpOASES/include/qpOASES_e/Flipper.h', \
'external/qpOASES/include/qpOASES_e/Indexlist.h', \
'external/qpOASES/include/qpOASES_e/Matrices.h', \
'external/qpOASES/include/qpOASES_e/MessageHandling.h', \
'external/qpOASES/include/qpOASES_e/Options.h', \
'external/qpOASES/include/qpOASES_e/QProblem.h', \
'external/qpOASES/include/qpOASES_e/QProblemB.h', \
'external/qpOASES/include/qpOASES_e/Utils.h' \
]
# Files that should be renamed to avoid conflicts
oldfiles = ['external/qpOASES/include/qpOASES_e/Types.h']
newfiles = ['include/qpOASES_e_Types.h']
# Create directory structure and copy files
if not os.path.exists(target_dir):
os.system('mkdir '+target_dir)
for filename in workingcodefiles:
os.system('cp '+top_dir+'/'+filename+' '+target_dir)
if not os.path.exists(target_dir+'/include'):
os.system('mkdir '+target_dir+'/include')
for filename in workingheaderfiles:
os.system('cp '+top_dir+'/'+filename+' '+target_dir+'/include/')
for kk in range(len(oldfiles)):
os.system('cp '+top_dir+'/'+oldfiles[kk]+' '+target_dir+'/'+newfiles[kk])
print('Step 1: Necessary files copied.')
# 2. Modify .h and .c files to adapt to the new code structure:
# List of texts to be replaced:
old_text = [\
'examples/c/Chen_model/chen_model.h', \
'acados/ocp_qp/condensing.h', \
'acados/ocp_qp/condensing_helper_functions.c', \
'acados/ocp_qp/ocp_qp_common.h', \
'acados/ocp_qp/ocp_qp_condensing_qpoases.h', \
'acados/ocp_qp/ocp_qp_hpmpc.h', \
'acados/sim/sim_common.h', \
'acados/sim/sim_erk_integrator.h', \
'acados/sim/sim_collocation.h', \
'acados/sim/sim_rk_common.h', \
'acados/utils/print.h', \
'acados/utils/timing.h', \
'acados/utils/types.h', \
'hpmpc/include/aux_d.h', \
'../include/block_size.h', \
'../include/kernel_d_lib4.h', \
'blasfeo/include/blasfeo_common.h', \
'blasfeo/include/blasfeo_i_aux.h', \
'qpOASES_e/Bounds.h', \
'qpOASES_e/Constants.h', \
'qpOASES_e/Constraints.h', \
'qpOASES_e/ConstraintProduct.h', \
'qpOASES_e/Flipper.h', \
'qpOASES_e/Indexlist.h', \
'qpOASES_e/Matrices.h', \
'qpOASES_e/MessageHandling.h', \
'qpOASES_e/Options.h', \
'qpOASES_e/QProblem.h', \
'qpOASES_e/QProblemB.h', \
'qpOASES_e/Types.h', \
'qpOASES_e/Utils.h' \
]
# List of new texts to replace old ones,
# in corresponding order to old_text:
new_text = [\
'chen_model.h', \
'condensing.h', \
'condensing_helper_functions.c', \
'ocp_qp_common.h', \
'ocp_qp_condensing_qpoases.h', \
'ocp_qp_hpmpc.h', \
'sim_common.h', \
'sim_erk_integrator.h', \
'sim_collocation.h', \
'sim_rk_common.h', \
'print.h', \
'timing.h', \
'types.h', \
'aux_d.h', \
'block_size.h', \
'kernel_d_lib4.h', \
'blasfeo_common.h', \
'blasfeo_i_aux.h', \
'Bounds.h', \
'Constants.h', \
'Constraints.h', \
'ConstraintProduct.h', \
'Flipper.h', \
'Indexlist.h', \
'Matrices.h', \
'MessageHandling.h', \
'Options.h', \
'QProblem.h', \
'QProblemB.h', \
'qpOASES_e_Types.h', \
'Utils.h' \
]
len_old_text = len(old_text)
len_new_text = len(new_text)
if len_old_text != len_new_text:
raise ValueError('Number of old and new texts not match')
files = glob.glob(target_dir+"/*.c")
for file in files:
objFile = open(file, "r")
txtFile = objFile.read()
objFile.close()
for replacetext in range(len_old_text):
txtFile = txtFile.replace(old_text[replacetext],new_text[replacetext])
objFile = open(file, "w")
objFile.write(txtFile)
objFile.close()
files = glob.glob(target_dir+"/include/*.h")
for file in files:
objFile = open(file, "r")
txtFile = objFile.read()
objFile.close()
for replacetext in range(len_old_text):
txtFile = txtFile.replace(old_text[replacetext],new_text[replacetext])
objFile = open(file, "w")
objFile.write(txtFile)
objFile.close()
print('Step 2: Path information in files modified to the new structure.')
# 3. Add specific code to HPMPC and BLASFEO files:
# List of files to be modified:
files = ['include/block_size.h']
# List of lines to be added in the beginning of files,
# in corresponding order with the list files:
lines = ['#include "target.h"\n']
if len(files) != len(lines):
raise ValueError('Number of files and added lines not match')
for kk in range(len(files)):
objFile = open(target_dir+'/'+files[kk], "r")
txtFile = objFile.read()
objFile.close()
objFile = open(target_dir+'/'+files[kk], "w")
objFile.write(lines[kk]) # write the line to the beginning
objFile.write(txtFile)
objFile.close()
print('Step 3: Common header file included in specific files.')
# 4. Copy Makefile and specific setting files
os.system('cp '+top_dir+'/experimental/dang/esp32/script_test_nmpc_qpoases/Makefile '+target_dir)
os.system('cp '+top_dir+'/experimental/dang/esp32/script_test_nmpc_qpoases/target.h '+target_dir+'/include/')
print('Step 4: Makefile, and HPMPC target.h replaced.')
# 5. Display further instructions
print('Please do next steps in terminal:')
print(' cd '+target_dir)
print(' make')
print('Then run the binary file in '+target_dir+'/bin')
print('To remove binary objects: make clean\n')
| 1.789063 | 2 |
messengerext/surveys/templatetags/app_filters.py | groupsome/groupsome | 6 | 77572 | <reponame>groupsome/groupsome
from django import template
register = template.Library()
def get_survey_user_voted_count(survey):
users = []
for choice in survey.choices.all():
for vote in choice.votes.all():
if vote.user not in users:
users.append(vote.user)
return len(users)
def get_survey_votes_count(survey):
count = 0
for choice in survey.choices.all():
count += choice.votes.all().count()
return count
def get_choice_votes(choice):
return choice.votes.all().count()
def get_choice_percentage(choice):
all_count = get_survey_votes_count(choice.survey)
choice_count = choice.votes.all().count()
try:
return "%d%%" % (float(choice_count) / all_count * 100)
except (ValueError, ZeroDivisionError):
return "0%"
def get_user_voted(survey, request):
for choice in survey.choices.all():
voted = choice.votes.all().filter(user=request.user)
if voted:
return True
return False
def get_user_voted_for_choice(choice, request):
voted = choice.votes.all().filter(user=request.user)
if voted:
return True
return False
def get_users_from_choice(choice):
users = []
for vote in choice.votes.all():
if vote.user not in users:
users.append(vote.user)
return users
def get_user_is_admin_for_survey(survey, user):
if user in survey.group.admins.all():
return True
return False
register.filter('survey_user_voted_count', get_survey_user_voted_count)
register.filter('survey_votes_count', get_survey_votes_count)
register.filter('choice_percentage', get_choice_percentage)
register.filter('user_voted', get_user_voted)
register.filter('user_voted_for_choice', get_user_voted_for_choice)
register.filter('choice_votes', get_choice_votes)
register.filter('users_from_choice', get_users_from_choice)
register.filter('user_is_admin_for_survey', get_user_is_admin_for_survey)
| 1.828125 | 2 |
pgxnclient/commands/info.py | intgr/pgxnclient | 1 | 77700 | <gh_stars>1-10
"""
pgxnclient -- informative commands implementation
"""
# Copyright (C) 2011 <NAME>
# This file is part of the PGXN client
from pgxnclient.i18n import _, N_
from pgxnclient import SemVer
from pgxnclient.errors import NotFound, ResourceNotFound
from pgxnclient.commands import Command, WithSpec
import logging
logger = logging.getLogger('pgxnclient.commands')
class Mirror(Command):
name = 'mirror'
description = N_("return information about the available mirrors")
@classmethod
def customize_parser(self, parser, subparsers, **kwargs):
subp = super(Mirror, self).customize_parser(
parser, subparsers, **kwargs)
subp.add_argument('uri', nargs='?', metavar="URI",
help = _("return detailed info about this mirror."
" If not specified return a list of mirror URIs"))
subp.add_argument('--detailed', action="store_true",
help = _("return full details for each mirror"))
return subp
def run(self):
data = self.api.mirrors()
if self.opts.uri:
detailed = True
data = [ d for d in data if d['uri'] == self.opts.uri ]
if not data:
raise ResourceNotFound(
_('mirror not found: %s') % self.opts.uri)
else:
detailed = self.opts.detailed
for i, d in enumerate(data):
if not detailed:
print d['uri']
else:
for k in [
"uri", "frequency", "location", "bandwidth", "organization",
"email", "timezone", "src", "rsync", "notes",]:
print "%s: %s" % (k, d.get(k, ''))
print
import re
import textwrap
import xml.sax.saxutils as saxutils
class Search(Command):
name = 'search'
description = N_("search in the available extensions")
@classmethod
def customize_parser(self, parser, subparsers, **kwargs):
subp = super(Search, self).customize_parser(
parser, subparsers, **kwargs)
g = subp.add_mutually_exclusive_group()
g.add_argument('--docs', dest='where', action='store_const',
const='docs', default='docs',
help=_("search in documentation [default]"))
g.add_argument('--dist', dest='where', action='store_const',
const="dists",
help=_("search in distributions"))
g.add_argument('--ext', dest='where', action='store_const',
const='extensions',
help=_("search in extensions"))
subp.add_argument('query', metavar='TERM', nargs='+',
help = _("a string to search"))
return subp
def run(self):
data = self.api.search(self.opts.where, self.opts.query)
for hit in data['hits']:
print "%s %s" % (hit['dist'], hit['version'])
if 'excerpt' in hit:
excerpt = self.clean_excerpt(hit['excerpt'])
for line in textwrap.wrap(excerpt, 72):
print " " + line
print
def clean_excerpt(self, excerpt):
"""Clean up the excerpt returned in the json result for output."""
# replace ellipsis with three dots, as there's no chance
# to have them printed on non-utf8 consoles.
# Also, they suck obscenely on fixed-width output.
excerpt = excerpt.replace('…', '...')
# TODO: this apparently misses a few entities
excerpt = saxutils.unescape(excerpt)
excerpt = excerpt.replace('"', '"')
# Convert numerical entities
excerpt = re.sub(r'\&\#(\d+)\;',
lambda c: unichr(int(c.group(1))),
excerpt)
# Hilight found terms
# TODO: use proper highlight with escape chars?
excerpt = excerpt.replace('<strong></strong>', '')
excerpt = excerpt.replace('<strong>', '*')
excerpt = excerpt.replace('</strong>', '*')
return excerpt
class Info(WithSpec, Command):
name = 'info'
description = N_("print information about a distribution")
@classmethod
def customize_parser(self, parser, subparsers, **kwargs):
subp = super(Info, self).customize_parser(
parser, subparsers, **kwargs)
g = subp.add_mutually_exclusive_group()
g.add_argument('--details', dest='what',
action='store_const', const='details', default='details',
help=_("show details about the distribution [default]"))
g.add_argument('--meta', dest='what',
action='store_const', const='meta',
help=_("show the distribution META.json"))
g.add_argument('--readme', dest='what',
action='store_const', const='readme',
help=_("show the distribution README"))
g.add_argument('--versions', dest='what',
action='store_const', const='versions',
help=_("show the list of available versions"))
return subp
def run(self):
spec = self.get_spec()
getattr(self, 'print_' + self.opts.what)(spec)
def print_meta(self, spec):
data = self._get_dist_data(spec.name)
ver = self.get_best_version(data, spec, quiet=True)
print self.api.meta(spec.name, ver, as_json=False)
def print_readme(self, spec):
data = self._get_dist_data(spec.name)
ver = self.get_best_version(data, spec, quiet=True)
print self.api.readme(spec.name, ver)
def print_details(self, spec):
data = self._get_dist_data(spec.name)
ver = self.get_best_version(data, spec, quiet=True)
data = self.api.meta(spec.name, ver)
for k in [u'name', u'abstract', u'description', u'maintainer', u'license',
u'release_status', u'version', u'date', u'sha1']:
try:
v = data[k]
except KeyError:
logger.warn(_("data key '%s' not found"), k)
continue
if isinstance(v, list):
for vv in v:
print "%s: %s" % (k, vv)
elif isinstance(v, dict):
for kk, vv in v.iteritems():
print "%s: %s: %s" % (k, kk, vv)
else:
print "%s: %s" % (k, v)
k = 'provides'
for ext, dext in data[k].iteritems():
print "%s: %s: %s" % (k, ext, dext['version'])
k = 'prereqs'
if k in data:
for phase, rels in data[k].iteritems():
for rel, pkgs in rels.iteritems():
for pkg, ver in pkgs.iteritems():
print "%s: %s: %s %s" % (phase, rel, pkg, ver)
def print_versions(self, spec):
data = self._get_dist_data(spec.name)
name = data['name']
vs = [ (SemVer(d['version']), s)
for s, ds in data['releases'].iteritems()
for d in ds ]
vs = [ (v, s) for v, s in vs if spec.accepted(v) ]
vs.sort(reverse=True)
for v, s in vs:
print name, v, s
def _get_dist_data(self, name):
try:
return self.api.dist(name)
except NotFound, e:
# maybe the user was looking for an extension instead?
try:
ext = self.api.ext(name)
except NotFound:
pass
else:
vs = ext.get('versions', {})
for extver, ds in vs.iteritems():
for d in ds:
if 'dist' not in d: continue
dist = d['dist']
distver = d.get('version', 'unknown')
logger.info(
_("extension %s %s found in distribution %s %s"),
name, extver, dist, distver)
raise e
| 1.898438 | 2 |
katsdpscripts/test/test_session.py | ska-sa/katsdpscripts | 0 | 77828 | ###############################################################################
# SKA South Africa (http://ska.ac.za/) #
# Author: <EMAIL> #
# Copyright @ 2013 SKA SA. All rights reserved. #
# #
# THIS SOFTWARE MAY NOT BE COPIED OR DISTRIBUTED IN ANY FORM WITHOUT THE #
# WRITTEN PERMISSION OF SKA SA. #
###############################################################################
import unittest2 as unittest
from katcorelib.testutils import NameSpace
import katcorelib.rts_session as session
class Test_CaptureSessionBase(unittest.TestCase):
def setUp(self):
self.DUT = session.CaptureSessionBase()
def test_get_ant_names(self):
self.DUT.kat = NameSpace()
self.DUT.kat.controlled_objects = ['ant1', 'rfe7', 'ant2', 'katarchive']
self.DUT.kat.__dict__['katconfig'] = NameSpace()
self.DUT.kat.katconfig.__dict__['arrays'] = {}
self.DUT.kat.katconfig.arrays = {'ants': ['ant1','ant2']}
self.assertEqual(self.DUT.get_ant_names(), 'ant1,ant2')
def test_mkat_get_ant_names(self):
self.DUT.kat = NameSpace()
self.DUT.kat.controlled_objects = ['m000', 'rfe7', 'm063', 'katarchive']
self.DUT.kat.__dict__['katconfig'] = NameSpace()
self.DUT.kat.katconfig.__dict__['arrays'] = {}
self.DUT.kat.katconfig.arrays = {'ants': ['m000','m063']}
self.assertEqual(self.DUT.get_ant_names(), 'm000,m063')
| 1.460938 | 1 |
Crawl library journal data/functions.py | luoagnes/financial-reports-analysis | 4 | 77956 | # -*- coding: utf-8 -*-
import time
from selenium import webdriver
from selenium.webdriver.support.select import Select
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
def get_browser_driver():
driver= webdriver.Chrome(executable_path='C:\Python27\chromedriver.exe')
print 'get the browser driver successfully!----------'
return driver
def login(driver):
driver.get('http://www.fenqubiao.com/')
driver.find_element_by_id('Username').send_keys('szu')
driver.find_element_by_id('Password').send_keys('<PASSWORD>')
driver.find_element_by_id('login_button').click()
print 'login successfully!------------'
return True
def config_form(driver, i):
Select(driver.find_element_by_id("ContentPlaceHolder1_dplCategoryType")).select_by_value('0') ### 0 is the big class, 1 is the small class
Select(driver.find_element_by_id("ContentPlaceHolder1_dplCategory")).select_by_index(i)
driver.find_element_by_id("ContentPlaceHolder1_btnSearch").click()
driver.implicitly_wait(30)
time.sleep(3)
driver.find_element_by_id("ContentPlaceHolder1_btnSearch").click()
time.sleep(2)
driver.find_element_by_id("ContentPlaceHolder1_btnSearch").click()
return True
### 拉动滚动条到网页末尾
def scroll(driver):
driver.execute_script("""
(function () {
var y = document.body.scrollTop;
var step = 100;
window.scroll(0, y);
function f() {
if (y < document.body.scrollHeight) {
y += step;
window.scroll(0, y);
setTimeout(f, 50);
}
else {
window.scroll(0, y);
document.title += "scroll-done";
}
}
setTimeout(f, 1000);
})();
""")
| 1.585938 | 2 |
example/test/T3_fengche.py | Michael8968/skulpt | 2 | 78084 | <reponame>Michael8968/skulpt
import turtle
turtle.mode("logo")
turtle.shape("turtle") # 让画笔变成海龟(turtle)
turtle.hideturtle() # 隐藏画笔
turtle.pencolor("orange")
turtle.pensize(4)
# 第1个风车扇叶
turtle.forward(100)
turtle.right(120)
turtle.forward(100)
turtle.right(120)
turtle.forward(100)
turtle.right(120)
turtle.right(120)
# 第2个风车扇叶
turtle.forward(100)
turtle.right(120)
turtle.forward(100)
turtle.right(120)
turtle.forward(100)
turtle.right(120)
turtle.right(120)
# 第3个风车扇叶
turtle.forward(100)
turtle.right(120)
turtle.forward(100)
turtle.right(120)
turtle.forward(100)
turtle.right(120)
turtle.right(120)
# 风车柄
turtle.right(180)
turtle.forward(200)
turtle.done()
| 1.53125 | 2 |
tests/test_encryption.py | IABTechLab/uid2-client-python | 1 | 78212 | # Copyright (c) 2021 The Trade Desk, Inc
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import base64
import datetime as dt
import unittest
from Crypto.Cipher import AES
from uid2_client import decrypt_token, encrypt_data, decrypt_data, encryption_block_size, EncryptionError
from uid2_client.encryption import _encrypt_data_v1
from uid2_client.keys import *
_master_secret = bytes([139, 37, 241, 173, 18, 92, 36, 232, 165, 168, 23, 18, 38, 195, 123, 92, 160, 136, 185, 40, 91, 173, 165, 221, 168, 16, 169, 164, 38, 139, 8, 155])
_site_secret = bytes([32, 251, 7, 194, 132, 154, 250, 86, 202, 116, 104, 29, 131, 192, 139, 215, 48, 164, 11, 65, 226, 110, 167, 14, 108, 51, 254, 125, 65, 24, 23, 133])
_master_key_id = 164
_site_key_id = 165
_test_site_key_id = 166
_site_id = 2001
_uid2 = 'ywsvDNINiZOVSsfkHpLpSJzXzhr6Jx9Z/4Q0+lsEUvM='
_now = dt.datetime.utcnow()
_master_key = EncryptionKey(_master_key_id, -1, _now - dt.timedelta(days=-1), _now, _now + dt.timedelta(days=1), _master_secret)
_site_key = EncryptionKey(_site_key_id, _site_id, _now - dt.timedelta(days=-1), _now, _now + dt.timedelta(days=1), _site_secret)
_test_site_key = EncryptionKey(_test_site_key_id, _site_id, dt.datetime(2020, 1, 1, 0, 0, 0), dt.datetime(2021, 1, 1, 0, 0, 0), _now + dt.timedelta(days=1), encryption_block_size * b'9')
class TestEncryptionFunctions(unittest.TestCase):
def test_decrypt_token(self):
token = _encrypt_token(_uid2, _master_key, _site_key)
keys = EncryptionKeysCollection([_master_key, _site_key])
result = decrypt_token(token, keys)
self.assertEqual(_uid2, result.uid2)
def test_decrypt_token_empty_keys(self):
token = _encrypt_token(_uid2, _master_key, _site_key)
keys = EncryptionKeysCollection([])
with self.assertRaises(EncryptionError):
result = decrypt_token(token, keys)
def test_decrypt_token_no_master_key(self):
token = _encrypt_token(_uid2, _master_key, _site_key)
keys = EncryptionKeysCollection([_site_key])
with self.assertRaises(EncryptionError):
result = decrypt_token(token, keys)
def test_decrypt_token_no_site_key(self):
token = _encrypt_token(_uid2, _master_key, _site_key)
keys = EncryptionKeysCollection([_master_key])
with self.assertRaises(EncryptionError):
result = decrypt_token(token, keys)
def test_decrypt_token_invalid_version(self):
token = _encrypt_token(_uid2, _master_key, _site_key, version=1)
keys = EncryptionKeysCollection([_master_key, _site_key])
with self.assertRaises(EncryptionError):
result = decrypt_token(token, keys)
def test_decrypt_token_expired(self):
token = _encrypt_token(_uid2, _master_key, _site_key, expiry=dt.timedelta(seconds=-1))
keys = EncryptionKeysCollection([_master_key, _site_key])
with self.assertRaises(EncryptionError):
result = decrypt_token(token, keys)
def test_decrypt_token_custom_now(self):
expiry = dt.datetime(2021, 3, 22, 9, 1, 2)
token = _encrypt_token(_uid2, _master_key, _site_key, expiry=expiry)
keys = EncryptionKeysCollection([_master_key, _site_key])
with self.assertRaises(EncryptionError):
result = decrypt_token(token, keys, now=expiry+dt.timedelta(seconds=1))
result = decrypt_token(token, keys, now=expiry-dt.timedelta(seconds=1))
self.assertEqual(_uid2, result.uid2)
def test_decrypt_token_invalid_payload(self):
token = _encrypt_token(_uid2, _master_key, _site_key, expiry=dt.timedelta(seconds=-1))
keys = EncryptionKeysCollection([_master_key, _site_key])
with self.assertRaises(EncryptionError):
result = decrypt_token(token[:-3], keys)
def test_encrypt_data_specific_key_and_iv(self):
data = b'123456'
iv = encryption_block_size * b'0'
key = _test_site_key
keys = EncryptionKeysCollection([key])
encrypted = encrypt_data(data, iv=iv, key=key)
self.assertTrue(len(data) + len(iv) < len(encrypted))
decrypted = decrypt_data(encrypted, keys)
self.assertEqual(data, decrypted.data)
def test_encrypt_data_specific_key_and_generated_iv(self):
data = b'123456'
key = _test_site_key
keys = EncryptionKeysCollection([key])
encrypted = encrypt_data(data, key=key)
self.assertTrue(len(data) + encryption_block_size < len(encrypted))
decrypted = decrypt_data(encrypted, keys)
self.assertEqual(data, decrypted.data)
def test_encrypt_data_specific_site_id(self):
data = b'123456'
key = _test_site_key
keys = EncryptionKeysCollection([key])
encrypted = encrypt_data(data, site_id=key.site_id, keys=keys)
self.assertTrue(len(data) + encryption_block_size < len(encrypted))
decrypted = decrypt_data(encrypted, keys)
self.assertEqual(data, decrypted.data)
def test_encrypt_data_site_id_from_token(self):
data = b'123456'
key = _test_site_key
keys = EncryptionKeysCollection([_master_key, key])
token = _encrypt_token(_uid2, _master_key, key, site_id=key.site_id)
encrypted = encrypt_data(data, advertising_token=token, keys=keys)
self.assertTrue(len(data) + encryption_block_size < len(encrypted))
decrypted = decrypt_data(encrypted, keys)
self.assertEqual(data, decrypted.data)
def test_encrypt_data_keys_and_specific_key_set(self):
data = b'123456'
key = _test_site_key
keys = EncryptionKeysCollection([key])
with self.assertRaises(ValueError):
encrypt_data(data, key=key, keys=keys)
def test_encrypt_data_site_id_and_token_set(self):
data = b'123456'
key = _test_site_key
keys = EncryptionKeysCollection([_master_key, key])
token = _encrypt_token(_uid2, _master_key, key, site_id=key.site_id)
with self.assertRaises(ValueError):
encrypt_data(data, keys=keys, site_id=key.site_id, advertising_token=token)
def test_encrypt_data_token_decrypt_failed(self):
data = b'123456'
key = _test_site_key
keys = EncryptionKeysCollection([_master_key, _test_site_key])
with self.assertRaises(EncryptionError):
encrypt_data(data, keys=keys, advertising_token="bogus-token")
def test_encrypt_data_key_expired(self):
data = b'123456'
site_id = 205
key = EncryptionKey(101, site_id, _now - dt.timedelta(days=2), _now - dt.timedelta(days=2), _now - dt.timedelta(days=1), encryption_block_size * b'9')
with self.assertRaises(EncryptionError):
encrypt_data(data, key=key)
def test_encrypt_data_key_inactive(self):
data = b'123456'
site_id = 205
key = EncryptionKey(101, site_id, _now - dt.timedelta(days=2), _now + dt.timedelta(days=2), _now + dt.timedelta(days=3), encryption_block_size * b'9')
with self.assertRaises(EncryptionError):
encrypt_data(data, key=key)
def test_encrypt_data_key_expired_custom_now(self):
data = b'123456'
key = _test_site_key
now = _test_site_key.expires
with self.assertRaises(EncryptionError):
encrypt_data(data, key=key, now=now)
def test_encrypt_data_key_inactive_custom_now(self):
data = b'123456'
key = _test_site_key
now = _test_site_key.activates - dt.timedelta(seconds=1)
with self.assertRaises(EncryptionError):
encrypt_data(data, key=key, now=now)
def test_encrypt_data_no_site_key(self):
data = b'123456'
keys = EncryptionKeysCollection([_master_key])
with self.assertRaises(EncryptionError):
encrypt_data(data, keys=keys, site_id=205)
def test_encrypt_data_site_key_expired(self):
data = b'123456'
site_id = 205
key = EncryptionKey(101, site_id, _now - dt.timedelta(days=2), _now - dt.timedelta(days=2), _now - dt.timedelta(days=1), encryption_block_size * b'9')
keys = EncryptionKeysCollection([key])
with self.assertRaises(EncryptionError):
encrypt_data(data, keys=keys, site_id=site_id)
def test_encrypt_data_site_key_inactive(self):
data = b'123456'
site_id = 205
key = EncryptionKey(101, site_id, _now - dt.timedelta(days=2), _now + dt.timedelta(days=2), _now + dt.timedelta(days=3), encryption_block_size * b'9')
keys = EncryptionKeysCollection([key])
with self.assertRaises(EncryptionError):
encrypt_data(data, keys=keys, site_id=site_id)
def test_encrypt_data_site_key_expired_custom_now(self):
data = b'123456'
site_id = 205
now = dt.datetime.utcnow() - dt.timedelta(days=1)
key = EncryptionKey(101, site_id, now, now, now + dt.timedelta(seconds=1), encryption_block_size * b'9')
keys = EncryptionKeysCollection([key])
encrypted = encrypt_data(data, site_id=site_id, keys=keys, now=now)
decrypted = decrypt_data(encrypted, keys)
self.assertEqual(data, decrypted.data)
self.assertEqual(format_time(now), format_time(decrypted.encrypted_at))
def test_encrypt_data_expired_token(self):
data = b'123456'
expiry = dt.datetime(2021, 3, 22, 9, 1, 2)
key = _test_site_key
keys = EncryptionKeysCollection([_master_key, key])
token = _encrypt_token(_uid2, _master_key, key, site_id=key.site_id, expiry=expiry)
with self.assertRaises(EncryptionError):
encrypt_data(data, keys=keys, advertising_token=token)
def test_encrypt_data_expired_token_custom_now(self):
data = b'123456'
expiry = dt.datetime(2021, 3, 22, 9, 1, 2)
key = _test_site_key
keys = EncryptionKeysCollection([_master_key, key])
token = _encrypt_token(_uid2, _master_key, key, site_id=key.site_id, expiry=expiry)
with self.assertRaises(EncryptionError):
encrypt_data(data, keys=keys, advertising_token=token, now=expiry+dt.timedelta(seconds=1))
now = expiry-dt.timedelta(seconds=1)
encrypted = encrypt_data(data, keys=keys, advertising_token=token, now=now)
decrypted = decrypt_data(encrypted, keys)
self.assertEqual(data, decrypted.data)
self.assertEqual(format_time(now), format_time(decrypted.encrypted_at))
def test_decrypt_data_bad_payload_type(self):
data = b'123456'
key = _test_site_key
keys = EncryptionKeysCollection([key])
encrypted = encrypt_data(data, key=key)
encrypted_bytes = base64.b64decode(encrypted)
encrypted = base64.b64encode(bytes([0]) + encrypted_bytes[1:]).decode('ascii')
with self.assertRaises(EncryptionError):
decrypt_data(encrypted, keys)
def test_decrypt_data_bad_version(self):
data = b'123456'
key = _test_site_key
keys = EncryptionKeysCollection([key])
encrypted = encrypt_data(data, key=key)
encrypted_bytes = base64.b64decode(encrypted)
encrypted = base64.b64encode(encrypted_bytes[0:1] + bytes([0]) + encrypted_bytes[2:]).decode('ascii')
with self.assertRaises(EncryptionError):
decrypt_data(encrypted, keys)
def test_decrypt_data_bad_payload(self):
data = b'123456'
key = _test_site_key
keys = EncryptionKeysCollection([key])
encrypted = encrypt_data(data, key=key)
encrypted_bytes = base64.b64decode(encrypted)
encrypted = base64.b64encode(encrypted_bytes + b'1').decode('ascii')
with self.assertRaises(EncryptionError):
decrypt_data(encrypted, keys)
encrypted = base64.b64encode(encrypted_bytes[:-2]).decode('ascii')
with self.assertRaises(EncryptionError):
decrypt_data(encrypted, keys)
def test_decrypt_data_no_decryption_key(self):
data = b'123456'
key = _test_site_key
keys = EncryptionKeysCollection([key])
encrypted = encrypt_data(data, key=key)
dkeys = EncryptionKeysCollection([_master_key])
with self.assertRaises(EncryptionError):
decrypt_data(encrypted, dkeys)
def _encrypt_token(id_str, master_key, site_key, version=2, expiry=dt.timedelta(hours=1), site_id=0, privacy_bits=0):
id = bytes(id_str, 'utf-8')
identity = int.to_bytes(site_id, 4, 'big')
identity += int.to_bytes(len(id), 4, 'big')
identity += id
identity += int.to_bytes(privacy_bits, 4, 'big')
identity += int.to_bytes(int((dt.datetime.utcnow() - dt.timedelta(hours=1)).timestamp()) * 1000, 8, 'big')
identity_iv = bytes([10, 11, 12, 13, 14, 15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9])
if not isinstance(expiry, dt.datetime):
expiry = dt.datetime.utcnow() + expiry
master_payload = int.to_bytes(int(expiry.timestamp()) * 1000, 8, 'big')
master_payload += _encrypt_data_v1(identity, key=site_key, iv=identity_iv)
master_iv = bytes([21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36])
token = int.to_bytes(version, 1, 'big')
token += _encrypt_data_v1(master_payload, key=master_key, iv=master_iv)
return base64.b64encode(token).decode('ascii')
def format_time(t):
s = t.strftime('%Y-%m-%d %H:%M:%S.%f')
return s[:-3]
| 1.296875 | 1 |
master/utils-master/utils-master/.startup.py | AlexRogalskiy/DevArtifacts | 4 | 78340 | <reponame>AlexRogalskiy/DevArtifacts
# python startup file to toggle autocomplete on.
# e.g. export PYTHONSTARTUPFILE=pythonstartup.py
#
import readline
import rlcompleter
readline.parse_and_bind('tab: complete')
print "Auto complete is enabled"
| 0.921875 | 1 |
frontend/__init__.py | karllindmark/IsYourProjectUpToDate | 0 | 78468 | """ Module for the frontend, ie the UI and presentation layer """
| 0.28125 | 0 |
3084.py | ErFer7/URI-Python | 1 | 78596 | # -*- coding: utf-8 -*-
while True:
try:
hm = list(map(float, input().split()))
h = int((hm[0] / 360) * 12)
m = int((hm[1] / 360) * 60)
if m == 60:
m = 0
print("{:02d}:{:02d}".format(h, m))
except (EOFError, IndexError):
break | 1.546875 | 2 |
networking_huawei/tests/unit/drivers/ac/client/test_restclient.py | libuparayil/networking-huawei | 2 | 78724 | <filename>networking_huawei/tests/unit/drivers/ac/client/test_restclient.py
# Copyright (c) 2016 Huawei Technologies India Pvt Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import requests
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslotest import base
import networking_huawei.drivers.ac.client.restclient as ac_rest
from networking_huawei.drivers.ac.common import config # noqa
test_create_network_req = {'network':
{'routerExternal': False,
'networkType': 'local',
'segmentationId': None,
'adminStateUp': True,
'tenant_id': 'test-tenant',
'name': 'net1',
'physicalNetwork': None,
'serviceName': 'physnet1',
'id': 'd897e21a-dfd6-4331-a5dd-7524fa421c3e',
'status': 'ACTIVE',
'shared': False}}
class HuaweiACRestClientTestCase(base.BaseTestCase):
def setUp(self):
cfg.CONF.set_override('username', 'huawei_user', 'huawei_ac_config')
cfg.CONF.set_override('password', '<PASSWORD>', 'huawei_ac_config')
cfg.CONF.set_override('neutron_ip', '127.0.0.1', 'huawei_ac_config')
cfg.CONF.set_override('neutron_name', 'NS_1', 'huawei_ac_config')
super(HuaweiACRestClientTestCase, self).setUp()
self.restc = ac_rest.RestClient
self.host = cfg.CONF.huawei_ac_config.host
self.port = cfg.CONF.huawei_ac_config.port
self.url = '%s%s%s%s' % ("http://", self.host, ":", str(self.port))
def _mock_req_resp(self, status_code):
response = mock.Mock()
response.response = "OK"
response.status_code = status_code
response.errorcode = 0
response.content = jsonutils.dumps(
{'result': "ok", 'errorCode': '0', 'errorMsg': None}, indent=2)
return response
def test_rc_send_timeout(self):
methodname = 'POST'
url = '/controller/dc/esdk/v2.0/test_url'
expected_ret = {'errorCode': None, 'reason': None,
'response': None, 'status': -1}
with mock.patch.object(self.restc, 'process_request',
return_value="Timeout Exceptions"):
ret = ac_rest.RestClient().send(self.host, self.port,
methodname, url, hex(10), {})
self.assertEqual(expected_ret, ret, "Not expected return")
def test_rc_send_success(self):
methodname = 'POST'
url = '/controller/dc/esdk/v2.0/test_url'
expected_resp = {'errorCode': u'0', 'reason': None,
'response': 'ok', 'status': 204}
with mock.patch.object(self.restc,
'process_request',
return_value=self._mock_req_resp
(requests.codes.no_content)):
ret = ac_rest.RestClient().send(self.host, self.port,
methodname, url,
hex(10),
test_create_network_req)
self.assertEqual(expected_resp, ret, "Not expected response")
def test_rc_send_del_network(self):
methodname = 'DELETE'
url = '/controller/dc/esdk/v2.0/test_url'
expected_resp = {'errorCode': None, 'reason': None,
'response': None, 'status': 200}
resp = self._mock_req_resp(requests.codes.ok)
resp.content = ""
with mock.patch.object(self.restc, 'process_request',
return_value=resp):
ret = ac_rest.RestClient().send(self.host, self.port,
methodname, url,
hex(10),
test_create_network_req)
self.assertEqual(expected_resp, ret, "Not expected response")
def test_rc_send_del_network_resp_valid(self):
methodname = 'DELETE'
url = '/controller/dc/esdk/v2.0/test_url'
expected_resp = {'errorCode': None, 'reason': None,
'response': None, 'status': 300}
resp = self._mock_req_resp(requests.codes.multiple_choices)
with mock.patch.object(self.restc, 'process_request',
return_value=resp):
ret = ac_rest.RestClient().send(self.host, self.port,
methodname, url,
hex(10),
test_create_network_req)
self.assertEqual(expected_resp, ret, "Not expected response")
def test_rc_process_request(self):
methodname = 'DELETE'
url = '/controller/dc/esdk/v2.0/test_url'
auth = (cfg.CONF.huawei_ac_config.username,
cfg.CONF.huawei_ac_config.password)
headers = {'Accept': 'application/json',
'Content-type': 'application/json'}
data = {"network": {"routerExternal": False,
"id": "d897e21a-dfd6-4331-a5dd-7524fa421c3e",
"serviceName": "physnet1",
"status": "ACTIVE",
"shared": False,
"adminStateUp": True,
"tenant_id": "test-tenant",
"segmentationId": None,
"physicalNetwork": None,
"networkType": "local",
"name": "net1"}}
resp = self._mock_req_resp(requests.codes.no_content)
kwargs = {'url': url, 'data': data}
with mock.patch('requests.request',
return_value=resp) as mock_method:
ac_rest.RestClient().process_request(methodname, auth,
url, headers,
data)
mock_method.\
assert_called_once_with(
methodname,
headers={'Content-type':
'application/json',
'Accept':
'application/json'},
timeout=float(cfg.CONF.
huawei_ac_config.
request_timeout),
verify=False,
auth=(cfg.CONF.huawei_ac_config.username,
cfg.CONF.huawei_ac_config.password),
**kwargs)
def test_rc_process_request_timeout_exception(self):
methodname = 'DELETE'
url = '/controller/dc/esdk/v2.0/test_url'
auth = (cfg.CONF.huawei_ac_config.username,
cfg.CONF.huawei_ac_config.password)
headers = {'Accept': 'application/json',
'Content-type': 'application/json'}
data = {"network": {"routerExternal": False,
"id": "d897e21a-dfd6-4331-a5dd-7524fa421c3e",
"serviceName": "physnet1",
"status": "ACTIVE",
"shared": False,
"adminStateUp": True,
"tenant_id": "test-tenant",
"segmentationId": None,
"physicalNetwork": None,
"networkType": "local",
"name": "net1"}}
resp = self._mock_req_resp(requests.codes.no_content)
kwargs = {'url': url, 'data': data}
with mock.patch('requests.request',
return_value=resp) as mock_method:
mock_method.side_effect = requests.exceptions.\
Timeout(mock.Mock(msg="Timeout Exceptions"))
ac_rest.RestClient().\
process_request(methodname, auth, url, headers, data)
mock_method.\
assert_any_call(methodname,
headers={'Content-type':
'application/json',
'Accept':
'application/json'},
timeout=float(cfg.CONF.
huawei_ac_config.
request_timeout),
verify=False,
auth=(cfg.CONF.huawei_ac_config.username,
cfg.CONF.huawei_ac_config.password),
**kwargs)
def test_rc_process_request_exception(self):
methodname = 'DELETE'
url = '/controller/dc/esdk/v2.0/test_url'
auth = (cfg.CONF.huawei_ac_config.username,
cfg.CONF.huawei_ac_config.password)
headers = {'Accept': 'application/json',
'Content-type': 'application/json'}
data = {"network": {"routerExternal": False,
"id": "d897e21a-dfd6-4331-a5dd-7524fa421c3e",
"serviceName": "physnet1",
"status": "ACTIVE",
"shared": False,
"adminStateUp": True,
"tenant_id": "test-tenant",
"segmentationId": None,
"physicalNetwork": None,
"networkType": "local",
"name": "net1"}}
resp = self._mock_req_resp(requests.codes.no_content)
kwargs = {'url': url, 'data': data}
with mock.patch('requests.request',
return_value=resp) as mock_method:
mock_method.side_effect = Exception(mock.Mock(msg="Timeout "
"Exceptions"))
ac_rest.RestClient().process_request(methodname, auth,
url,
headers, data)
mock_method.\
assert_any_call(methodname,
headers={'Content-type':
'application/json',
'Accept':
'application/json'},
timeout=float(cfg.CONF.
huawei_ac_config.
request_timeout),
verify=False,
auth=(cfg.CONF.huawei_ac_config.username,
cfg.CONF.huawei_ac_config.password),
**kwargs)
def test_rc_send_http_success(self):
http = {'errorCode': None, 'reason': None,
'response': None, 'status': 300}
ret = ac_rest.RestClient().http_success(http)
self.assertEqual(False, ret,
"Not expected response")
| 1.25 | 1 |
MTS/word/HeaderWord.py | ohhorob/pyMTS | 1 | 78852 | import ctypes
c_uint8 = ctypes.c_uint8
class HeaderWord(ctypes.BigEndianStructure):
MAGIC_MASK = 0xA280
_fields_ = [
# Low Byte
('CLEAR07', c_uint8, 1), # 07
('LengthLow', c_uint8, 7), # 06..00
# High Byte
('HEADER15', c_uint8, 1), # 15
('Recording', c_uint8, 1), # 14
('CLEAR13', c_uint8, 1), # 13
('DataOrResponse', c_uint8, 1), # 12
('LogCapable', c_uint8, 1), # 11
('RESERVED10', c_uint8, 1), # 10
('CLEAR09', c_uint8, 1), # 09
('LengthHigh', c_uint8, 1), # 08
]
def is_valid(self):
if self.HEADER15 == 0:
raise ValueError('Header start marker (15) not set.')
if self.CLEAR13 == 0:
raise ValueError('(13) not set')
if self.CLEAR09 == 0:
raise ValueError('(09) not set')
if self.CLEAR07 == 0:
raise ValueError('(07) not set')
return True
def is_data(self):
return self.DataOrResponse > 0
def is_response(self):
return self.DataOrResponse < 1
def can_log(self):
return self.LogCapable > 0
def is_recording(self):
return self.Recording > 0
def length(self):
return (self.LengthHigh << 7) | self.LengthLow
| 1.671875 | 2 |
seahub/organizations/views.py | samuelduann/seahub | 420 | 78980 | # Copyright (c) 2012-2016 Seafile Ltd.
# encoding: utf-8
import logging
import json
from urllib.parse import urlparse
from django.conf import settings
from django.contrib import messages
from django.urls import reverse
from django.core.cache import cache
from django.utils.translation import ugettext_lazy as _
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.shortcuts import render
from django.utils.crypto import get_random_string
import seaserv
from seaserv import ccnet_api
from seahub.auth import login
from seahub.auth.decorators import login_required, login_required_ajax
from seahub.base.accounts import User
from seahub.group.views import remove_group_common
from seahub.profile.models import Profile
from seahub.utils import get_service_url, render_error
from seahub.utils.auth import get_login_bg_image_path
from seahub.organizations.signals import org_created
from seahub.organizations.decorators import org_staff_required
from seahub.organizations.forms import OrgRegistrationForm
from seahub.organizations.settings import ORG_AUTO_URL_PREFIX, \
ORG_MEMBER_QUOTA_ENABLED, ORG_ENABLE_ADMIN_INVITE_USER
from seahub.organizations.utils import get_or_create_invitation_link
# Get an instance of a logger
logger = logging.getLogger(__name__)
########## ccnet rpc wrapper
def create_org(org_name, url_prefix, creator):
return seaserv.create_org(org_name, url_prefix, creator)
def count_orgs():
return seaserv.ccnet_threaded_rpc.count_orgs()
def get_org_by_url_prefix(url_prefix):
return seaserv.ccnet_threaded_rpc.get_org_by_url_prefix(url_prefix)
def set_org_user(org_id, username, is_staff=False):
return seaserv.ccnet_threaded_rpc.add_org_user(org_id, username,
int(is_staff))
def unset_org_user(org_id, username):
return seaserv.ccnet_threaded_rpc.remove_org_user(org_id, username)
def org_user_exists(org_id, username):
return seaserv.ccnet_threaded_rpc.org_user_exists(org_id, username)
def get_org_groups(org_id, start, limit):
return seaserv.ccnet_threaded_rpc.get_org_groups(org_id, start, limit)
def get_org_id_by_group(group_id):
return seaserv.ccnet_threaded_rpc.get_org_id_by_group(group_id)
def remove_org_group(org_id, group_id, username):
remove_group_common(group_id, username)
seaserv.ccnet_threaded_rpc.remove_org_group(org_id, group_id)
def is_org_staff(org_id, username):
return seaserv.ccnet_threaded_rpc.is_org_staff(org_id, username)
def set_org_staff(org_id, username):
return seaserv.ccnet_threaded_rpc.set_org_staff(org_id, username)
def unset_org_staff(org_id, username):
return seaserv.ccnet_threaded_rpc.unset_org_staff(org_id, username)
########## seafile rpc wrapper
def get_org_user_self_usage(org_id, username):
"""
Arguments:
- `org_id`:
- `username`:
"""
return seaserv.seafserv_threaded_rpc.get_org_user_quota_usage(org_id, username)
def get_org_user_quota(org_id, username):
return seaserv.seafserv_threaded_rpc.get_org_user_quota(org_id, username)
def get_org_quota(org_id):
return seaserv.seafserv_threaded_rpc.get_org_quota(org_id)
def is_org_repo(org_id, repo_id):
return True if seaserv.seafserv_threaded_rpc.get_org_id_by_repo_id(
repo_id) == org_id else False
########## views
@login_required_ajax
def org_add(request):
"""Handle ajax request to add org, and create org owner.
Arguments:
- `request`:
"""
if not request.user.is_staff or request.method != 'POST':
raise Http404
content_type = 'application/json; charset=utf-8'
url_prefix = gen_org_url_prefix(3)
post_data = request.POST.copy()
post_data['url_prefix'] = url_prefix
form = OrgRegistrationForm(post_data)
if form.is_valid():
email = form.cleaned_data['email']
password = <PASSWORD>.cleaned_data['<PASSWORD>']
org_name = form.cleaned_data['org_name']
url_prefix = form.cleaned_data['url_prefix']
try:
new_user = User.objects.create_user(email, password,
is_staff=False, is_active=True)
except User.DoesNotExist as e:
logger.error(e)
err_msg = 'Fail to create organization owner %s.' % email
return HttpResponse(json.dumps({'error': err_msg}),
status=403, content_type=content_type)
create_org(org_name, url_prefix, new_user.username)
return HttpResponse(json.dumps({'success': True}),
content_type=content_type)
else:
try:
err_msg = list(form.errors.values())[0][0]
except IndexError:
err_msg = list(form.errors.values())[0]
return HttpResponse(json.dumps({'error': str(err_msg)}),
status=400, content_type=content_type)
def gen_org_url_prefix(max_trial=None):
"""Generate organization url prefix automatically.
If ``max_trial`` is large than 0, then re-try that times if failed.
Arguments:
- `max_trial`:
Returns:
Url prefix if succed, otherwise, ``None``.
"""
def _gen_prefix():
url_prefix = 'org_' + get_random_string(
6, allowed_chars='abcdefghijklmnopqrstuvwxyz0123456789')
if get_org_by_url_prefix(url_prefix) is not None:
logger.info("org url prefix, %s is duplicated" % url_prefix)
return None
else:
return url_prefix
try:
max_trial = int(max_trial)
except (TypeError, ValueError):
max_trial = 0
while max_trial >= 0:
ret = _gen_prefix()
if ret is not None:
return ret
else:
max_trial -= 1
logger.warning("Failed to generate org url prefix, retry: %d" % max_trial)
return None
def org_register(request):
"""Allow a new user to register an organization account. A new
organization will be created associate with that user.
Arguments:
- `request`:
"""
login_bg_image_path = get_login_bg_image_path()
if request.method == 'POST':
form = OrgRegistrationForm(request.POST)
if ORG_AUTO_URL_PREFIX:
# generate url prefix automatically
url_prefix = gen_org_url_prefix(3)
if url_prefix is None:
messages.error(request, "Failed to create organization account, please try again later.")
return render(request, 'organizations/org_register.html', {
'form': form,
'login_bg_image_path': login_bg_image_path,
'org_auto_url_prefix': ORG_AUTO_URL_PREFIX,
})
post_data = request.POST.copy()
post_data['url_prefix'] = url_prefix
form = OrgRegistrationForm(post_data)
if form.is_valid():
name = form.cleaned_data['name']
email = form.cleaned_data['email']
password = form.cleaned_data['<PASSWORD>']
org_name = form.cleaned_data['org_name']
url_prefix = form.cleaned_data['url_prefix']
new_user = User.objects.create_user(email, password,
is_staff=False, is_active=True)
create_org(org_name, url_prefix, new_user.username)
new_org = get_org_by_url_prefix(url_prefix)
org_created.send(sender=None, org=new_org)
if name:
Profile.objects.add_or_update(new_user.username, name)
# login the user
new_user.backend = settings.AUTHENTICATION_BACKENDS[0]
login(request, new_user)
return HttpResponseRedirect(reverse('libraries'))
else:
form = OrgRegistrationForm()
service_url = get_service_url()
up = urlparse(service_url)
service_url_scheme = up.scheme
service_url_remaining = up.netloc + up.path
return render(request, 'organizations/org_register.html', {
'form': form,
'login_bg_image_path': login_bg_image_path,
'service_url_scheme': service_url_scheme,
'service_url_remaining': service_url_remaining,
'org_auto_url_prefix': ORG_AUTO_URL_PREFIX,
})
@login_required
@org_staff_required
def react_fake_view(request, **kwargs):
group_id = kwargs.get('group_id', '')
org = request.user.org
invitation_link = get_or_create_invitation_link(org.org_id) if ORG_ENABLE_ADMIN_INVITE_USER else ''
# Whether use new page
return render(request, "organizations/org_admin_react.html", {
'org': org,
'org_member_quota_enabled': ORG_MEMBER_QUOTA_ENABLED,
'group_id': group_id,
'invitation_link': invitation_link,
})
@login_required
def org_associate(request, token):
"""Associate user with coresponding org.
Mainly used for new WeChat user on doc.seafile.com.
"""
username = request.user.username
# validate token
org_id = cache.get('org_associate_%s' % token, -1)
if org_id <= 0:
return render_error(request, _('Invalid token.'))
# get org info
org = ccnet_api.get_org_by_id(org_id)
if not org:
return render_error(request, 'Invalid org id')
# Log user in if he/she already belongs to any orgs.
orgs = ccnet_api.get_orgs_by_user(username)
if orgs:
return HttpResponseRedirect(settings.LOGIN_REDIRECT_URL)
# check org member quota
if ORG_MEMBER_QUOTA_ENABLED:
from seahub.organizations.models import OrgMemberQuota
org_members = len(ccnet_api.get_org_users_by_url_prefix(org.url_prefix,
-1, -1))
org_members_quota = OrgMemberQuota.objects.get_quota(org_id)
if org_members_quota is not None and org_members >= org_members_quota:
return render_error(request, 'Above quota')
set_org_user(org_id, username)
return HttpResponseRedirect(settings.LOGIN_REDIRECT_URL)
| 1.257813 | 1 |
test_spmm.py | binfoo1993/torchdgl | 0 | 79108 | <reponame>binfoo1993/torchdgl
import argparse, time, math
import numpy as np
import networkx as nx
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import dgl
import dgl.function as fn
from dgl.data import register_data_args
from dgl.data import CoraGraphDataset, CiteseerGraphDataset, PubmedGraphDataset
from dgl.data import RedditDataset
from torch_sparse import SparseTensor
from torch_geometric.nn import MessagePassing
from torch_sparse import matmul
from typing import Optional
th.classes.load_library("build/libadjmatrix.so")
AdjMatrix = th.classes.DGL.AdjMatrix
def do_spmm(adj: AdjMatrix,
op: str,
reduce: str,
ufeat : Optional[th.Tensor],
efeat : Optional[th.Tensor]):
return th.ops.DGL.GSpMM(adj, op, reduce, ufeat, efeat)
scripted_spmm = th.jit.script(do_spmm)
class GCNConv_pyg(MessagePassing):
def __init__(self):
super(GCNConv_pyg, self).__init__(aggr="add")
def forward(self, x, edge_index):
out = self.propagate(edge_index, x=x)
return out
def message(self, x_j):
return x_j
def message_and_aggregate(self, adj_t, x):
return matmul(adj_t, x, reduce=self.aggr)
pyg_spmm = GCNConv_pyg()
def run_dgl(g, features):
g.ndata["h"] = features
g.update_all(fn.copy_src(src="h", out="m"), fn.sum(msg="m", out="h"))
return g.ndata['h']
def run_pyg(edge_index, features):
return pyg_spmm(features, edge_index)
def run_script(adj, features):
return scripted_spmm(adj, "copy_lhs", "sum", features, None)
def main(args):
# load and preprocess dataset
if args.dataset == 'cora':
data = CoraGraphDataset()
elif args.dataset == 'citeseer':
data = CiteseerGraphDataset()
elif args.dataset == 'pubmed':
data = PubmedGraphDataset()
elif args.dataset == 'reddit':
data = RedditDataset()
else:
raise ValueError('Unknown dataset: {}'.format(args.dataset))
g = data[0]
if args.gpu < 0:
cuda = False
else:
cuda = True
g = g.to(args.gpu)
features = g.ndata['feat']
in_feats = features.shape[1]
n_classes = data.num_classes
print("feature size: {}".format(in_feats))
# add self loop
g = dgl.remove_self_loop(g)
g = dgl.add_self_loop(g)
n_edges = g.number_of_edges()
src, dst = g.edges()
edge_index = SparseTensor(row=src,
col=dst,
sparse_sizes=(g.number_of_nodes(), g.number_of_nodes()))
adj = AdjMatrix(src, dst)
runtime = 0.0
n = 1
if args.impl == "dgl":
run_dgl(g, features)
if args.gpu >= 0:
th.cuda.synchronize()
th.cuda.nvtx.range_push("spmm start")
for _ in range(n):
start_run = time.perf_counter()
run_dgl(g, features)
if args.gpu >= 0:
th.cuda.synchronize()
runtime += time.perf_counter() - start_run
th.cuda.nvtx.range_pop()
elif args.impl == "pyg":
run_pyg(edge_index, features)
if args.gpu >= 0:
th.cuda.synchronize()
for _ in range(n):
start_run = time.perf_counter()
run_pyg(edge_index, features)
if args.gpu >= 0:
th.cuda.synchronize()
runtime += time.perf_counter() - start_run
else:
run_script(adj, features)
if args.gpu >= 0:
th.cuda.synchronize()
for _ in range(n):
start_run = time.perf_counter()
run_script(adj, features)
if args.gpu >= 0:
th.cuda.synchronize()
runtime += time.perf_counter() - start_run
#print('Time (ms): {:.3f}'.format(runtime*1e3/n))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='GCN')
register_data_args(parser)
parser.add_argument("--impl", type=str, default="dgl",
help="use torch script or not")
parser.add_argument("--gpu", type=int, default=-1,
help="gpu")
args = parser.parse_args()
print(args)
main(args)
| 1.695313 | 2 |
cifra/tests/test_dictionaries.py | dante-signal31/cifra | 0 | 79236 | """
Tests for attack.dictionaries module.
"""
import math
import os
import dataclasses
import pytest
import tempfile
from typing import List
from test_common.fs.ops import copy_files
from test_common.fs.temp import temp_dir
from cifra.attack.dictionaries import Dictionary, get_words_from_text, \
NotExistingLanguage, get_words_from_text_file, identify_language, \
IdentifiedLanguage, get_word_pattern, get_histogram_from_text_file
MICRO_DICTIONARIES = {
"english": ["yes", "no", "dog", "cat", "snake"],
"spanish": ["si", "no", "perro", "gato"],
"french": ["qui", "non", "chien", "chat"],
"german": ["ja", "nein", "hund", "katze"]
}
TEXT_FILE_NAME = "text_to_load.txt"
ENGLISH_TEXT_WITHOUT_PUNCTUATIONS_MARKS = """This eBook is for the use of anyone anywhere at no cost and with
almost no restrictions whatsoever You may copy it give it away or
re use it under the terms of the Project Gutenberg License included
with this eBook or online at"""
ENGLISH_TEXT_WITH_PUNCTUATIONS_MARKS = """This eBook\n is for the use of anyone anywhere at no cost and with
almost no restrictions whatsoever.You may copy it, give it\r\n away or
re-use it under the terms of the Project Gutenberg License included
with this eBook or online at 2020"""
SPANISH_TEXT_WITHOUT_PUNCTUATIONS_MARKS = """Todavía lo recuerdo como si aquello hubiera sucedido ayer llegó á las
puertas de la posada estudiando su aspecto afanosa y atentamente
seguido por su maleta que alguien conducía tras él en una carretilla de
mano Era un hombre alto fuerte pesado con un moreno pronunciado
color de avellana Su trenza ó coleta alquitranada le caía sobre los
hombros de su nada limpia blusa marina Sus manos callosas destrozadas
y llenas de cicatrices enseñaban las extremidades de unas uñas rotas y
negruzcas Y su rostro moreno llevaba en una mejilla aquella gran
cicatriz de sable sucia y de un color blanquizco lívido y repugnante
Todavía lo recuerdo paseando su mirada investigadora en torno del
cobertizo silbando mientras examinaba y prorrumpiendo en seguida en
aquella antigua canción marina que tan á menudo le oí cantar después"""
SPANISH_TEXT_WITH_PUNCTUATIONS_MARKS = """Todavía lo recuerdo como si aquello hubiera sucedido ayer: llegó á las
puertas de la posada estudiando su aspecto, afanosa y atentamente,
seguido por su maleta que alguien conducía tras él en una carretilla de
mano. Era un hombre alto, fuerte, pesado, con un moreno pronunciado,
color de avellana. Su trenza ó coleta alquitranada le caía sobre los
hombros de su nada limpia blusa marina. Sus manos callosas, destrozadas
y llenas de cicatrices enseñaban las extremidades de unas uñas rotas y
negruzcas. Y su rostro moreno llevaba en una mejilla aquella gran
cicatriz de sable, sucia y de un color blanquizco, lívido y repugnante.
Todavía lo recuerdo, paseando su mirada investigadora en torno del
cobertizo, silbando mientras examinaba y prorrumpiendo, en seguida, en
aquella antigua canción marina que tan á menudo le oí cantar después:"""
FRENCH_TEXT_WITHOUT_PUNCTUATIONS_MARKS = """Combien le lecteur tandis que commodément assis au coin de son feu
il s amuse à feuilleter les pages d un roman combien il se rend peu
compte des fatigues et des angoisses de l auteur Combien il néglige de
se représenter les longues nuits de luttes contre des phrases rétives
les séances de recherches dans les bibliothèques les correspondances
avec d érudits et illisibles professeurs allemands en un mot tout
l énorme échafaudage que l auteur a édifié et puis démoli simplement
pour lui procurer à lui lecteur quelques instants de distraction au
coin de son feu ou encore pour lui tempérer l ennui d une heure en
wagon"""
FRENCH_TEXT_WITH_PUNCTUATIONS_MARKS = """Combien le lecteur,--tandis que, commodément assis au coin de son feu,
il s'amuse à feuilleter les pages d'un roman,--combien il se rend peu
compte des fatigues et des angoisses de l'auteur! Combien il néglige de
se représenter les longues nuits de luttes contre des phrases rétives,
les séances de recherches dans les bibliothèques, les correspondances
avec d'érudits et illisibles professeurs allemands, en un mot tout
l'énorme échafaudage que l'auteur a édifié et puis démoli, simplement
pour lui procurer, à lui, lecteur, quelques instants de distraction au
coin de son feu, ou encore pour lui tempérer l'ennui d'une heure en
wagon!"""
GERMAN_TEXT_WITHOUT_PUNCTUATIONS_MARKS = """Da unser Gutsherr Mr Trelawney Dr Livesay und die übrigen Herren
mich baten alle Einzelheiten über die Schatzinsel von Anfang bis zu
Ende aufzuschreiben und nichts auszulassen als die Lage der Insel und
auch die nur weil noch ungehobene Schätze dort liegen nehme ich im
Jahre die Feder zur Hand und beginne bei der Zeit als mein Vater
noch den Gasthof <NAME> hielt und jener dunkle alte
Seemann mit dem Säbelhieb über der Wange unter unserem Dache Wohnung
nahm"""
GERMAN_TEXT_WITH_PUNCTUATIONS_MARKS = """Da unser Gutsherr, Mr. Trelawney, Dr. Livesay und die übrigen Herren
mich baten, alle Einzelheiten über die Schatzinsel von Anfang bis zu
Ende aufzuschreiben und nichts auszulassen als die Lage der Insel, und
auch die nur, weil noch ungehobene Schätze dort liegen, nehme ich im
Jahre 17.. die Feder zur Hand und beginne bei der Zeit, als mein Vater
noch den Gasthof „<NAME>“ hielt und jener dunkle, alte
Seemann mit dem Säbelhieb über der Wange unter unserem Dache Wohnung
nahm."""
LANGUAGES = ["english", "spanish", "french", "german"]
@dataclasses.dataclass
class LoadedDictionaries:
"""Class with info to use a temporary dictionaries database."""
temp_dir: str
languages: List[str]
@pytest.fixture(scope="session")
def loaded_dictionaries() -> LoadedDictionaries:
"""Create a dictionaries database at a temp dir filled with four languages.
Languages in database are: english, spanish, french and german.
:return: Yields a LoadedDictionary fill info of temporal dictionaries database.
"""
with tempfile.TemporaryDirectory() as temp_dir:
resources_path = os.path.join(temp_dir, "resources")
os.mkdir(resources_path)
copy_files([f"cifra/tests/resources/{language}_book.txt" for language in LANGUAGES], resources_path)
for language in LANGUAGES:
with Dictionary.open(language=language, create=True, _database_path=temp_dir) as dictionary:
language_book = os.path.join(temp_dir, f"resources/{language}_book.txt")
dictionary.populate(language_book)
yield LoadedDictionaries(temp_dir=temp_dir, languages=LANGUAGES)
@pytest.fixture()
def loaded_dictionary_temp_dir(tmp_path):
"""Create a dictionary at a temp dir filled with only a handful of words.
:return: Yields created temp_dir to host temporal dictionary database.
"""
# Load test data.
for language, words in MICRO_DICTIONARIES.items():
with Dictionary.open(language, create=True, _database_path=tmp_path) as language_dictionary:
_ = [language_dictionary.add_word(word) for word in words]
# Check all words are stored at database:
for language, words in MICRO_DICTIONARIES.items():
with Dictionary.open(language, _database_path=tmp_path) as language_dictionary:
assert all(language_dictionary.word_exists(word) for word in words)
yield tmp_path
@pytest.fixture(params=[(ENGLISH_TEXT_WITH_PUNCTUATIONS_MARKS, ENGLISH_TEXT_WITHOUT_PUNCTUATIONS_MARKS, "english"),
(SPANISH_TEXT_WITH_PUNCTUATIONS_MARKS, SPANISH_TEXT_WITHOUT_PUNCTUATIONS_MARKS, "spanish"),
(FRENCH_TEXT_WITH_PUNCTUATIONS_MARKS, FRENCH_TEXT_WITHOUT_PUNCTUATIONS_MARKS, "french"),
(GERMAN_TEXT_WITH_PUNCTUATIONS_MARKS, GERMAN_TEXT_WITHOUT_PUNCTUATIONS_MARKS, "german")],
ids=["english", "spanish", "french", "german"])
def temporary_text_file(temp_dir, request):
temporary_text_file_pathname = os.path.join(temp_dir, TEXT_FILE_NAME)
with open(temporary_text_file_pathname, "w") as text_file:
text_file.write(request.param[0])
text_file.flush()
yield text_file, request.param[1], request.param[2], temp_dir
@pytest.mark.quick_test
def test_open_not_existing_dictionary(temp_dir):
with pytest.raises(NotExistingLanguage):
with Dictionary.open("english", _database_path=temp_dir) as _:
pass
@pytest.mark.quick_test
def test_open_existing_dictionary(temp_dir):
# Create not existing language.
with Dictionary.open("english", create=True, _database_path=temp_dir) as _:
pass
# Open newly created language
with Dictionary.open("english", _database_path=temp_dir) as english_dictionary:
assert english_dictionary._already_created()
@pytest.mark.quick_test
def test_cwd_word(temp_dir):
"""Test if we can check for word existence, write a new word and finally delete it."""
word = "test"
with Dictionary.open("english", create=True, _database_path=temp_dir) as english_dictionary:
assert not english_dictionary.word_exists(word)
english_dictionary.add_word(word)
assert english_dictionary.word_exists(word)
english_dictionary.remove_word(word)
assert not english_dictionary.word_exists(word)
@pytest.mark.quick_test
def test_store_word_pattern(temp_dir):
"""Test word pattern is properly stored at database."""
word = "classification"
with Dictionary.open("test", create=True, _database_path=temp_dir) as test_dictionary:
assert not test_dictionary.word_exists(word)
test_dictionary.add_word(word)
assert test_dictionary.word_exists(word)
words = test_dictionary.get_words_with_pattern("0.1.2.3.3.4.5.4.0.2.6.4.7.8")
assert word in words
@pytest.mark.quick_test
def test_create_language(temp_dir):
"""Test a new language creation at database."""
english_dictionary = Dictionary("english", database_path=temp_dir)
english_dictionary._open()
assert not english_dictionary._already_created()
english_dictionary._create_dictionary()
assert english_dictionary._already_created()
english_dictionary._close()
@pytest.mark.quick_test
def test_delete_language(loaded_dictionary_temp_dir):
"""Test delete a language also removes its words."""
language_to_remove = "german"
Dictionary.remove_dictionary(language_to_remove, _database_path=loaded_dictionary_temp_dir)
# Check all words from removed language have been removed too.
not_existing_dictionary = Dictionary(language_to_remove, loaded_dictionary_temp_dir)
not_existing_dictionary._open()
assert all(not not_existing_dictionary.word_exists(word, _testing=True)
for word in MICRO_DICTIONARIES[language_to_remove])
not_existing_dictionary._close()
@pytest.mark.quick_test
def test_get_words_from_text_file(temporary_text_file):
text_file = temporary_text_file[0].name
text_without_punctuation_marks = temporary_text_file[1]
expected_set = set(text_without_punctuation_marks.lower().split())
returned_set = get_words_from_text_file(text_file)
assert expected_set == returned_set
@pytest.mark.quick_test
def test_populate_words_from_text_files(temporary_text_file):
text_file = temporary_text_file[0].name
text_without_punctuation_marks = temporary_text_file[1]
current_language = temporary_text_file[2]
temp_dir = temporary_text_file[3]
expected_set = set(text_without_punctuation_marks.lower().split())
with Dictionary.open(current_language, create=True, _database_path=temp_dir) as current_dictionary:
current_dictionary.populate(text_file)
with Dictionary.open(current_language, _database_path=temp_dir) as current_dictionary:
for word in expected_set:
assert current_dictionary.word_exists(word)
@pytest.mark.quick_test
def test_populate_database_histogram_from_text_file(temp_dir):
text_file_pathname = "cifra/tests/resources/english_book.txt"
with Dictionary.open("english", create=True, _database_path=temp_dir) as current_dictionary:
current_dictionary.populate(text_file_pathname)
with Dictionary.open("english", create=False, _database_path=temp_dir) as current_dictionary:
assert current_dictionary.letter_histogram["e"] == 35127
assert current_dictionary.letter_histogram["t"] == 26406
assert current_dictionary.letter_histogram["a"] == 24684
assert current_dictionary.letter_histogram["o"] == 22983
@pytest.mark.quick_test
@pytest.mark.parametrize("text_with_punctuation_marks,text_without_punctuation_marks",
[(ENGLISH_TEXT_WITH_PUNCTUATIONS_MARKS, ENGLISH_TEXT_WITHOUT_PUNCTUATIONS_MARKS),
(SPANISH_TEXT_WITH_PUNCTUATIONS_MARKS, SPANISH_TEXT_WITHOUT_PUNCTUATIONS_MARKS),
(FRENCH_TEXT_WITH_PUNCTUATIONS_MARKS, FRENCH_TEXT_WITHOUT_PUNCTUATIONS_MARKS),
(GERMAN_TEXT_WITH_PUNCTUATIONS_MARKS, GERMAN_TEXT_WITHOUT_PUNCTUATIONS_MARKS)],
ids=["english", "spanish", "french", "german"])
def test_get_words_from_text(text_with_punctuation_marks: str, text_without_punctuation_marks: str):
expected_set = set(text_without_punctuation_marks.lower().split())
returned_set = get_words_from_text(text_with_punctuation_marks)
assert expected_set == returned_set
@pytest.mark.slow_test
def test_get_dictionaries_names(loaded_dictionaries: LoadedDictionaries):
dictionaries_names = Dictionary.get_available_languages(_database_path=loaded_dictionaries.temp_dir)
assert dictionaries_names == loaded_dictionaries.languages
@pytest.mark.quick_test
def test_get_word_pattern():
word = "HGHHU"
expected_word_pattern = "0.1.0.0.2"
word_pattern = get_word_pattern(word)
assert word_pattern == expected_word_pattern
@pytest.mark.quick_test
def test_add_multiple_words(temp_dir):
language = "english"
with Dictionary.open(language, create=True, _database_path=temp_dir) as dictionary:
assert all(not dictionary.word_exists(word) for word in MICRO_DICTIONARIES[language])
dictionary.add_multiple_words(MICRO_DICTIONARIES[language])
assert all(dictionary.word_exists(word) for word in MICRO_DICTIONARIES[language])
@pytest.mark.slow_test
@pytest.mark.parametrize("text,language",
[(ENGLISH_TEXT_WITH_PUNCTUATIONS_MARKS, "english"),
(SPANISH_TEXT_WITH_PUNCTUATIONS_MARKS, "spanish")],
ids=["english",
"spanish"])
def test_identify_language(loaded_dictionaries: LoadedDictionaries, text: str, language: str):
identified_language = identify_language(text, loaded_dictionaries.temp_dir)
assert identified_language.winner == language
assert identified_language.winner_probability == 1.0
@pytest.mark.quick_test
def test_get_letter_histogram_from_text_file():
language_histogram = get_histogram_from_text_file("cifra/tests/resources/english_book.txt")
assert language_histogram["e"] == 35127
assert language_histogram["t"] == 26406
assert language_histogram["a"] == 24684
assert language_histogram["o"] == 22983
@pytest.mark.quick_test
def test_get_all_words(loaded_dictionary_temp_dir):
expected_words = ["yes", "no", "dog", "cat", "snake"]
with Dictionary.open("english", False, _database_path=loaded_dictionary_temp_dir) as dictionary:
returned_words = dictionary.get_all_words()
assert set(returned_words) == set(expected_words) | 1.679688 | 2 |
src/bxcommon/feed/filter_parsing.py | thabaptiser/bxcommon | 0 | 79364 | <filename>src/bxcommon/feed/filter_parsing.py
from typing import Callable, Dict
import pycond as pc
from bxutils import logging
logger = logging.get_logger(__name__)
pc.ops_use_symbolic_and_txt(allow_single_eq=True)
def get_validator(filter_string: str) -> Callable[[Dict], bool]:
logger.trace("Getting validator for filters {}", filter_string)
res = pc.qualify(filter_string.lower(), brkts="()", add_cached=True)
return res
| 1.382813 | 1 |
app.py | skrzypak/soaf | 0 | 79492 | <filename>app.py
import glob
import shutil
import subprocess
import os
import sys
import argparse
# Read and save metadata from file
def exiftool_metadata(path):
metadata = {}
exifToolPath = 'exifTool.exe'
''' use Exif tool to get the metadata '''
process = subprocess.Popen(
[
exifToolPath,
path
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True
)
''' get the tags in dict '''
for tag in process.stdout:
tag = tag.strip()
key = tag[:tag.find(':')].strip()
value = tag[tag.find(':') + 1:].strip()
metadata[key] = value
return metadata
class File:
def __init__(self, path):
self.metadata = exiftool_metadata(path)
def _get_file_metadata(self, key, no=''):
if key in self.metadata:
return self.metadata[key]
else:
return no
def copyCore(self, source, dst_dir: str, copy_duplicate=False):
logs = []
# if value of metadata not exists - folder name
no_metadata = 'none'
date = File._get_file_metadata(self, 'Date/Time Original')
if date == '':
date = File._get_file_metadata(self, 'Create Date', no_metadata)
mime_type = File._get_file_metadata(self, 'MIME Type', no_metadata)
dst_dir += f'''/{mime_type[:mime_type.find('/')]}/{date[:4]}/{date[5:7]}'''
filename = File._get_file_metadata(self, 'File Name')
f_name = filename
dst = dst_dir + '/' + filename
# File with the same name exists in dst. If source and dst have same size then determines 'copy_exists'
if os.path.isfile(dst):
i = 0
f_pth = File(dst)
if_same_size: bool = f_pth._get_file_metadata("File Size") == File._get_file_metadata(self, 'File Size')
if (not if_same_size) or copy_duplicate:
while os.path.isfile(dst):
filename = f'''{f_name[:f_name.find('.')]}_D{str(i)}.{File._get_file_metadata(self, 'File Type Extension')}'''
dst = f'''{dst_dir}/{filename}'''
i = i + 1
if if_same_size:
logs.append(f"Warning: file already exists but I must copy all files"
f" [copy_duplicate={copy_duplicate}], so I try do it ...")
else:
logs.append(f"Warning: file already exists but have other size, so I try copy it ...")
else:
logs.append(f"Warning: file already duplicate [copy_exists={copy_duplicate}]."
f"\nCopy aboard: {source} -> {dst}")
return logs
try:
if not os.path.isdir(dst_dir):
os.makedirs(dst_dir)
logs.append(f"New directory created: {dst_dir}")
shutil.copy(source, dst)
logs.append(f'''Copy done: {source} -> {dst}''')
except Exception as e:
logs.append(f'''Copy error [{e}]: {source} -> {dst}''')
return logs
def main():
# Arguments from console
parser = argparse.ArgumentParser()
parser.add_argument('-s', help="Obligatory: source directory path")
parser.add_argument('-d', help="Obligatory: destination folder path")
parser.add_argument('-e', help="Obligatory: copy duplicate files (T/True/F/False)")
args = parser.parse_args(sys.argv[1:])
# Setup variable
source_dir = args.s
dst_dir = args.d
df = {
"T": True,
"TRUE": True,
"F": False,
"FALSE": False
}
try:
copy_duplicate = df.get(args.e.upper(), False)
except AttributeError:
copy_duplicate = False
print(f"app.py: error: unrecognized arguments. Use -h or --help to see options")
exit(1)
# Number of log
l_lpm = 0
# source_dir = 'C:/Users'
# dst_dir = 'C:/Users'
# copy_duplicate = False
for f_inx, source in enumerate(glob.glob(source_dir + '/**/*.*', recursive=True)):
try:
f = File(source)
print("----------")
for log in f.copyCore(source, dst_dir, copy_duplicate):
l_lpm = l_lpm + 1
print(f'''{str(l_lpm)}.{f_inx + 1}) {log}''')
except Exception as e:
print(f'Copy error [{e}]: {source}')
if __name__ == '__main__':
main()
| 1.773438 | 2 |
hwtLib/tests/types/union_test.py | optical-o/hwtLib | 24 | 79620 | <filename>hwtLib/tests/types/union_test.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
from hwt.hdl.types.bits import Bits
from hwt.hdl.types.struct import HStruct
from hwt.hdl.types.union import HUnion
from hwtLib.types.ctypes import uint8_t, uint16_t, int8_t, uint32_t
from pyMathBitPrecise.bit_utils import mask
class UnionTC(unittest.TestCase):
def test_assertMembersSameSize(self):
t = HUnion(
(uint8_t, "a"),
(uint8_t, "b"),
(uint8_t, "c"),
(uint8_t, "d"),
)
self.assertEqual(t.bit_length(), 8)
with self.assertRaises(TypeError):
HUnion(
(uint16_t, "a"),
(uint8_t, "b"),
)
def test_assertNoPadding(self):
with self.assertRaises(AssertionError):
HUnion(
(uint8_t, None),
(uint8_t, "b"),
)
def test_value_simple(self):
t = HUnion(
(uint8_t, "unsigned"),
(int8_t, "signed"),
)
v = t.from_py(None)
v.unsigned = mask(8)
self.assertEqual(int(v.signed), -1)
v.signed = 0
self.assertEqual(int(v.unsigned), 0)
def test_value_struct_and_bits(self):
t = HUnion(
(uint16_t, "bits"),
(HStruct(
(uint8_t, "lower"),
(uint8_t, "upper"),
), "struct"),
)
v = t.from_py(None)
v.struct.upper = 1
self.assertEqual(v.bits.val, 1 << 8)
self.assertEqual(v.bits.vld_mask, mask(8) << 8)
v.struct.lower = 1
self.assertEqual(v.bits.val, (1 << 8) | 1)
self.assertEqual(v.bits.vld_mask, mask(16))
v.bits = 2
self.assertEqual(int(v.struct.lower), 2)
self.assertEqual(int(v.struct.upper), 0)
def test_value_array_and_bits(self):
t = HUnion(
(uint32_t, "bits"),
(uint8_t[4], "arr"),
)
v = t.from_py(None)
b = (4 << (3 * 8)) | (3 << (2 * 8)) | (2 << 8) | 1
v.bits = b
for i, item in enumerate(v.arr):
self.assertEqual(int(item), i + 1)
self.assertEqual(int(v.bits), b)
def test_value_array_toArray(self):
t = HUnion(
(uint16_t[2], "arr16b"),
(int8_t[4], "arr8b"),
)
v = t.from_py(None)
for i in range(len(v.arr16b)):
v.arr16b[i] = i + 1
for i, item in enumerate(v.arr8b):
if (i + 1) % 2 == 0:
v = 0
else:
v = i // 2 + 1
self.assertEqual(int(item), v)
def test_value_array_of_struct_to_bits(self):
t = HUnion(
(HStruct(
(uint16_t, "a"),
(uint8_t, "b"),
)[3], "arr"),
(Bits(24 * 3), "bits")
)
v = t.from_py(None)
for i in range(len(v.arr)):
v.arr[i] = {"a": i + 1,
"b": (i + 1) * 3
}
self.assertEqual(int(v.bits),
1
| 3 << 16
| 2 << 24
| 6 << (24 + 16)
| 3 << (2 * 24)
| 9 << (2 * 24 + 16))
def test_hunion_type_eq(self):
t0 = HUnion(
(HStruct(
(uint16_t, "a"),
(uint8_t, "b"),
)[3], "arr"),
(Bits(24 * 3), "bits")
)
t1 = HUnion(
(HStruct(
(uint16_t, "a"),
(uint8_t, "b"),
)[3], "arr"),
(Bits(24 * 3), "bits")
)
self.assertEqual(t0, t1)
self.assertEqual(t1, t0)
t1 = HUnion(
(Bits(24 * 3), "bits"),
(HStruct(
(uint16_t, "a"),
(uint8_t, "b"),
)[3], "arr")
)
self.assertEqual(t0, t1)
self.assertEqual(t1, t0)
t1 = HUnion(
(uint32_t, "bits"),
(uint8_t[4], "arr"),
)
self.assertNotEqual(t0, t1)
self.assertNotEqual(t1, t0)
t1 = HUnion(
(Bits(24 * 3), "bbits"),
(HStruct(
(uint16_t, "a"),
(uint8_t, "b"),
)[3], "arr")
)
self.assertNotEqual(t0, t1)
self.assertNotEqual(t1, t0)
t1 = Bits(24 * 3)
self.assertNotEqual(t0, t1)
self.assertNotEqual(t1, t0)
t1 = HUnion(
(Bits(24 * 3, signed=False), "bits"),
(HStruct(
(uint16_t, "a"),
(uint8_t, "b"),
)[3], "arr")
)
self.assertNotEqual(t0, t1)
self.assertNotEqual(t1, t0)
if __name__ == '__main__':
suite = unittest.TestSuite()
# suite.addTest(UnionTC('testValue'))
suite.addTest(unittest.makeSuite(UnionTC))
runner = unittest.TextTestRunner(verbosity=3)
runner.run(suite)
| 1.601563 | 2 |
app/python_file/blurdetectiongui.py | albertjuntak04/ditenun-web-1 | 0 | 79748 | <reponame>albertjuntak04/ditenun-web-1
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# GUI module generated by PAGE version 4.14
# In conjunction with Tcl version 8.6
# Jun 21, 2018 09:40:52 AM
import sys
from imutils import paths
import cv2
from PIL import Image as PilImage
from PIL import ImageTk
from tkinter import filedialog
try:
from Tkinter import *
except ImportError:
from tkinter import *
try:
import ttk
py3 = False
except ImportError:
import tkinter.ttk as ttk
py3 = True
import blurdetectiongui_support
def vp_start_gui():
'''Starting point when module is the main routine.'''
global val, w, root
root = Tk()
top = New_Toplevel (root)
blurdetectiongui_support.init(root, top)
root.mainloop()
w = None
def create_New_Toplevel(root, *args, **kwargs):
'''Starting point when module is imported by another program.'''
global w, w_win, rt
rt = root
w = Toplevel (root)
top = New_Toplevel (w)
blurdetectiongui_support.init(w, top, *args, **kwargs)
return (w, top)
def destroy_New_Toplevel():
global w
w.destroy()
w = None
class New_Toplevel:
def blurDetection(self):
image = cv2.imread(self.pembuka)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
fm = cv2.Laplacian(gray, cv2.CV_64F).var()
# filename = image.filename
hasil = "Blurry"
# if the focus measure is less than the supplied threshold,
# then the image should be considered "blurry"
if fm > 400:
hasil = "Improve"
if fm > 1500:
hasil = "Good"
self.Label4.configure(text='{:.2f}'.format(fm))
self.Label2.configure(text=hasil)
def srcImage(self):
tipeFile = (('image files', '*.jpg'), ('png files', '*.png'), ('all files', '*'))
self.pembuka = filedialog.askopenfilename(filetypes=tipeFile)
print(self.pembuka)
self.image = cv2.imread(self.pembuka)
self.image = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB)
self.image = cv2.resize(self.image,(300,300))
self.image = PilImage.fromarray(self.image)
self.image = ImageTk.PhotoImage(self.image)
self.Label1.configure(image=self.image)
self.Label1.image = self.image
def __init__(self, top=None):
'''This class configures and populates the toplevel window.
top is the toplevel containing window.'''
_bgcolor = '#d9d9d9' # X11 color: 'gray85'
_fgcolor = '#000000' # X11 color: 'black'
_compcolor = '#d9d9d9' # X11 color: 'gray85'
_ana1color = '#d9d9d9' # X11 color: 'gray85'
_ana2color = '#d9d9d9' # X11 color: 'gray85'
font10 = "-family {Segoe UI} -size 9 -weight bold -slant roman" \
" -underline 0 -overstrike 0"
font9 = "-family {Segoe UI} -size 14 -weight bold -slant roman" \
" -underline 0 -overstrike 0"
top.geometry("600x450+317+152")
top.title("New Toplevel")
top.configure(background="#d9d9d9")
top.configure(highlightbackground="#d9d9d9")
top.configure(highlightcolor="black")
self.Button1 = Button(top)
self.Button1.place(relx=0.53, rely=0.71, height=34, width=97)
self.Button1.configure(activebackground="#d9d9d9")
self.Button1.configure(activeforeground="#000000")
self.Button1.configure(background="#d9d9d9")
self.Button1.configure(disabledforeground="#a3a3a3")
self.Button1.configure(foreground="#000000")
self.Button1.configure(highlightbackground="#d9d9d9")
self.Button1.configure(highlightcolor="black")
self.Button1.configure(pady="0")
self.Button1.configure(text='''Select Image''')
self.Button1.configure(command=self.srcImage)
self.Button2 = Button(top)
self.Button2.place(relx=0.72, rely=0.71, height=34, width=97)
self.Button2.configure(activebackground="#d9d9d9")
self.Button2.configure(activeforeground="#000000")
self.Button2.configure(background="#d9d9d9")
self.Button2.configure(disabledforeground="#a3a3a3")
self.Button2.configure(foreground="#000000")
self.Button2.configure(highlightbackground="#d9d9d9")
self.Button2.configure(highlightcolor="black")
self.Button2.configure(pady="0")
self.Button2.configure(text='''Check Blur''')
self.Button2.configure(command=self.blurDetection)
self.Label1 = Label(top)
self.Label1.place(relx=0.08, rely=0.22, height=251, width=234)
self.Label1.configure(activebackground="#f9f9f9")
self.Label1.configure(activeforeground="black")
self.Label1.configure(background="#d9d9d9")
self.Label1.configure(disabledforeground="#a3a3a3")
self.Label1.configure(foreground="#000000")
self.Label1.configure(highlightbackground="#d9d9d9")
self.Label1.configure(highlightcolor="black")
self.Label1.configure(text='''Ulos Image''')
self.Label2 = Label(top)
self.Label2.place(relx=0.68, rely=0.39, height=21, width=74)
self.Label2.configure(activebackground="#f9f9f9")
self.Label2.configure(activeforeground="black")
self.Label2.configure(background="#d9d9d9")
self.Label2.configure(disabledforeground="#a3a3a3")
self.Label2.configure(foreground="#000000")
self.Label2.configure(highlightbackground="#d9d9d9")
self.Label2.configure(highlightcolor="black")
self.Label3 = Label(top)
self.Label3.place(relx=0.25, rely=0.04, height=41, width=324)
self.Label3.configure(activebackground="#f9f9f9")
self.Label3.configure(activeforeground="black")
self.Label3.configure(background="#d9d9d9")
self.Label3.configure(disabledforeground="#a3a3a3")
self.Label3.configure(font=font9)
self.Label3.configure(foreground="#000000")
self.Label3.configure(highlightbackground="#d9d9d9")
self.Label3.configure(highlightcolor="black")
self.Label3.configure(text='''Blur Detection Demo''')
self.Label4 = Label(top)
self.Label4.place(relx=0.68, rely=0.27, height=31, width=74)
self.Label4.configure(activebackground="#f9f9f9")
self.Label4.configure(activeforeground="black")
self.Label4.configure(background="#d9d9d9")
self.Label4.configure(disabledforeground="#a3a3a3")
self.Label4.configure(foreground="#000000")
self.Label4.configure(highlightbackground="#d9d9d9")
self.Label4.configure(highlightcolor="black")
self.Label5 = Label(top)
self.Label5.place(relx=0.52, rely=0.28, height=21, width=64)
self.Label5.configure(activebackground="#f9f9f9")
self.Label5.configure(activeforeground="black")
self.Label5.configure(background="#d9d9d9")
self.Label5.configure(disabledforeground="#a3a3a3")
self.Label5.configure(font=font10)
self.Label5.configure(foreground="#000000")
self.Label5.configure(highlightbackground="#d9d9d9")
self.Label5.configure(highlightcolor="black")
self.Label5.configure(text='''Blur Value''')
self.Label6 = Label(top)
self.Label6.place(relx=0.52, rely=0.38, height=21, width=54)
self.Label6.configure(activebackground="#f9f9f9")
self.Label6.configure(activeforeground="black")
self.Label6.configure(background="#d9d9d9")
self.Label6.configure(disabledforeground="#a3a3a3")
self.Label6.configure(font=font10)
self.Label6.configure(foreground="#000000")
self.Label6.configure(highlightbackground="#d9d9d9")
self.Label6.configure(highlightcolor="black")
self.Label6.configure(text='''Category''')
if __name__ == '__main__':
vp_start_gui()
| 2.015625 | 2 |
common.py | przecze/deutchs_algorithm_in_qiskit_vs_cirq | 0 | 79876 | <gh_stars>0
def to_braket(array):
""" helper for pretty printing """
state = []
basis = ('|00>', '|10>', '|01>', '|11>')
for im, base_state in zip(array, basis):
if im:
if abs(im.imag)>0.001:
state.append(f'{im.real:.1f}{base_state}')
else:
state.append(f'({im:.1f}){base_state}')
return " + ".join(state)
| 1.476563 | 1 |
tests/inputs/if-branching/28-join-to-top-mut.py | helq/pytropos | 4 | 80004 | <gh_stars>1-10
if _:
l = 2
else:
l = []
| 0.143555 | 0 |
quant/pointcloud.py | WhoIsJack/chemokine_buffering_paper | 1 | 80132 | # -*- coding: utf-8 -*-
"""
Created on Sun May 14 13:54:22 2017
@author: <NAME> @ Gilmour group @ EMBL Heidelberg
@descript: Functions for converting fluorescence intensity distributions
into a point cloud representation and then register them to
the image frame.
"""
#------------------------------------------------------------------------------
### Imports
# Standard external imports
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import scipy.ndimage as ndi
import sys, os
# Other external imports
from sklearn.decomposition import PCA
#------------------------------------------------------------------------------
### Function for landmark extraction
def generate_pcl(image, nHits, adjust_power=1, replace=False, seed=None):
# Seed random number generator
if seed:
np.random.seed(seed)
# Normalize to a total intensity of 1
normed = image.astype(np.float) / np.sum(image)
# Draw from distribution (without replacement)
indx_arr = np.arange(normed.flatten().shape[0])
hits_arr = np.random.choice(indx_arr,
size=nHits,
replace=replace,
p=normed.flatten())
# Unravel flat index hits array
hits = np.array(np.unravel_index(hits_arr,np.shape(normed))).T
# Return result
return hits
#------------------------------------------------------------------------------
### Main function
def point_cloud_pipeline(stack, ref_stack,
fpath, fname, res, num_LMs=500,
verbose=False, show=False):
"""Pipeline that extracts and aligns point clouds
from intensity distributions.
Parameters
----------
stack : 3D numpy image
Intensity distribution to convert to point cloud.
ref_stack : 3D numpy image
Intensity distribution reflecting overall tissue
shape (usually the membranes). Used for aligning
the cloud to the image frame and normalizing z.
fpath : string
Path of the source image file corresponding to the
input stack. Used to find matching metadata.
fname : list of strings
File name of the source image file corresponding
to the input stack. Used to find matching metadata.
res : list of floats
Pixel size in each dimension: [z, y, x].
Returns
-------
lms : numpy array of shape (num_LMs, 3)
Landmark coordinates in the image space (zyx).
lms_tf : numpy array of shape (num_LMs, 3)
Aligned and z-normalized landmark coordinates(zyx).
lum_dist_lms : numpy array of shape (num_LMs)
Euclidean distance of landmarks to the lumen.
"""
#--------------------------------------------------------------------------
### Run landmark assignment
# Run landmark assignment
lms = generate_pcl(stack, num_LMs, seed=42)
ref_lms = generate_pcl(ref_stack, num_LMs, seed=42)
# Change from pixels to um
lms = lms * np.array(res)
ref_lms = ref_lms * np.array(res)
# Plot results
if show:
plt.scatter(lms[:,2], lms[:,1], c=lms[:,0], cmap='viridis')
plt.title('Channel landmarks in image frame')
plt.show()
plt.scatter(ref_lms[:,2], ref_lms[:,1], c=ref_lms[:,0], cmap='viridis')
plt.title('Reference landmarks in image frame')
plt.show()
#--------------------------------------------------------------------------
### Cloud alignment via PCA
# Prep
pca = PCA()
# Fit PCA model to data
pca.fit(ref_lms)
# Ensure that the sign of PCs is consistent with the image frame
# Note: Given that the images are always acquired in the same orientations,
# a matching orientation can be ensured by finding the highest
# contributing image axis for each PC, and invert the PC if that
# contribution is negative. In other words, one ensures for each PC
# that the highest-contributing image axis is positively correlated
# with the PC.
# Find highest contributions of image axes to each PC
# Note: This asks "which image axis contributes the most to this PC?"
max_weights = np.argmax(np.abs(pca.components_),axis=1)
# Get the signs of the highest contributions
signs = np.sign(pca.components_[np.arange(pca.components_.shape[0]),max_weights])
# Using the signs, flip those PCs where the sign is negative
pca.components_ = pca.components_ * signs[:, np.newaxis]
# Match the order of PCs to the order of image dimensions (zyx)
# Note: Following the transform, the PCs will be sorted according to
# explained variance. Instead, they should be sorted in order of the
# highest contributing image dimension.
# Find indices for zyx-sorting of transformed data
# Note: This asks "which PC is most contributed to by this image axis?"
zyx_sort = np.argmax(np.abs(pca.components_),axis=0)
# Transform landmarks, sort according to zyx
lms_tf = pca.transform(lms)[:,zyx_sort]
ref_lms_tf = pca.transform(ref_lms)[:,zyx_sort]
# Get PCs and explained variance to report
PCs = np.copy(pca.components_.T)
PCvars = np.copy(pca.explained_variance_ratio_)
# Print results
if verbose:
print '\n PCs:'
print ' ', str(PCs).replace('\n','\n ')
print ' Explained variance:'
print ' ', str(PCvars)
# Plot results
if show:
plt.scatter(lms_tf[:,2], lms_tf[:,1], c=lms_tf[:,0],
cmap='viridis')
plt.title('Channel landmarks in matched frame')
plt.show()
plt.scatter(ref_lms_tf[:,2], ref_lms_tf[:,1], c=ref_lms_tf[:,0],
cmap='viridis')
plt.title('Reference landmarks in matched frame')
plt.show()
#--------------------------------------------------------------------------
### Normalize z
### ...by scaling the 1st and 99th percentile to 0 and 1, respectively.
# Get percentiles
mem_bot = np.percentile(ref_lms_tf[:,0],1)
mem_top = np.percentile(ref_lms_tf[:,0],99)
# Scale
lms_tf[:,0] = (lms_tf[:,0] - mem_bot) / (mem_top - mem_bot)
ref_lms_tf[:,0] = (ref_lms_tf[:,0] - mem_bot) / (mem_top - mem_bot)
#--------------------------------------------------------------------------
### Additional Measure: Distance from Lumen
# Import lumen data
lumen = 'none'
with open(os.path.join(fpath, r"metadata.txt"),"r") as infile:
for line in infile.readlines():
line = line.strip()
line = line.split('\t')
if line[0] in fname:
lumen = np.array([int(value) for value in line[1:4]])
break
if lumen is 'none':
raise Exception("Appropriate lumen metadata not found. Aborting!")
# Change from pixels to resolution
lumen = lumen * np.array(res)
# Get Euclidean distance from lumen
lum_dist_lms = np.sqrt(np.sum((lms-lumen)**2.0, axis=1))
# Transform to PCA space
lumen_tf = pca.transform(lumen.reshape(1,-1))[:,zyx_sort].squeeze()
# Normalization of z
lumen_tf[0] = (lumen_tf[0] - mem_bot) / (mem_top - mem_bot)
# Report
if verbose:
print ' Lumen (raw & tf):'
print ' ', lumen
print ' ', lumen_tf
# Plot to double-check
if show:
plt.scatter(ref_lms[:,2], ref_lms[:,1], c=ref_lms[:,0],
cmap='viridis')
plt.scatter(lumen[2], lumen[1], c='r', s=100)
plt.title('Reference landmarks in image frame (with lumen)')
plt.show()
plt.scatter(ref_lms_tf[:,2], ref_lms_tf[:,1], c=ref_lms_tf[:,0],
cmap='viridis')
plt.scatter(lumen_tf[2], lumen_tf[1], c='r', s=100)
plt.title('Reference landmarks in matched frame (with lumen)')
plt.show()
#--------------------------------------------------------------------------
### Return results
return lms, lms_tf, lum_dist_lms
#------------------------------------------------------------------------------
| 2.046875 | 2 |
Lesson 1. Introduction to Python/example.py | jonblower/python-viz-intro | 51 | 80260 | <reponame>jonblower/python-viz-intro
# A simple Python script that illustrates some of the concepts of
# variable assignment, loops, conditionals, functions and testing
# Load into SciTE and run by pressing F5
# Or rwith "python example.py"
# Variable assignment
temperature = 5.0 # a floating-point (decimal) number
numberOfLegs = 2 # an integer (whole number)
name = "Jon" # a string
# Conditionals
if temperature < 0.0:
print "It's freezing!"
elif temperature > 30.0:
print "It's hot! (If you're British)"
else:
print "Not too hot, not too cold"
# Note: there is no "end if"
# Loops
print "Here are the numbers from 0 to 9:"
for i in range(10):
print i
print "Here's another (longer) way to print the same thing"
i = 0
while i < 10:
print i
i = i + 1
# Define a function that constrains a value of longitude (in degrees) to be in the range
# [-180:180]
def lon180(lon):
lon = lon % 360 # The % symbol means "modulo"
if lon > 180:
return lon - 360
else:
return lon
# Here's a function that tests the above routine. It calls lon180 and checks
# the answer is as expected
def testLon180(lon, expected):
actual = lon180(lon)
# str(number) converts a number to a string
print "lon180(" + str(lon) + ") = " + str(actual) + ". Expected = " + str(expected)
# Here's another way to print the same information, using something like C's
# C's printf statement
#print "lon180(%f) = %f. Expected = %f" % (lon, actual, expected)
# Now test the function. You can probably think of lots more tests
testLon180(-180, 180)
testLon180(360, 0)
testLon180(-190, 170)
| 3.375 | 3 |
code/tpt/tpt1.py | Archkitten/sleep | 0 | 80388 | # Hack 1: InfoDB lists. Build your own/personalized InfoDb with a list length > 3, create list within a list as illustrated with Owns_Cars
blue = "\033[34m"
white = "\033[37m"
InfoDb = []
# List with dictionary records placed in a list
InfoDb.append({
"FirstName": "Michael",
"LastName": "Chen",
"DOB": "December 1",
"Residence": "San Diego",
"Email": "<EMAIL>",
"Owns_Cars":["2016 Ford Focus EV", "2019 Honda Pilot"]
})
InfoDb.append({
"FirstName": "Ethan",
"LastName": "Vo",
"DOB": "Not Born",
"Residence": "The Moon",
"Email": "<EMAIL>",
"Owns_Cars":["Broken Down Golf Cart"]
})
InfoDb.append({
"FirstName": "Anirudh",
"LastName": "Ramachandran",
"DOB": "August 18",
"Residence": "Uranus",
"Email": "<EMAIL>",
"Owns_Cars":["Can't Even Drive"]
})
# given an index this will print InfoDb content
def print_data(n):
print(InfoDb[n]["FirstName"], InfoDb[n]["LastName"]) # using comma puts space between values
print("\t", "Cars: ", end="") # \t is a tab indent, end="" make sure no return occurs
print(", ".join(InfoDb[n]["Owns_Cars"])) # join allows printing a string list with separator
print()
# Hack 2: InfoDB loops. Print values from the lists using three different ways: for, while, recursion
## hack 2a: def for_loop()
## hack 2b: def while_loop(0)
## hack 2c : def recursive_loop(0)
def for_loop():
for x in range(len(InfoDb)):
# print(InfoDb[x])
print_data(x)
def while_loop(x):
while x < len(InfoDb):
# print(InfoDb[x])
print_data(x)
x += 1
def recursive_loop(x):
if x < len(InfoDb):
# print(InfoDb[x])
print_data(x)
recursive_loop(x + 1)
# hack 3: fibonnaci
def fibonacci(n):
if n == 0:
return 0 # for 0
elif n == 1:
return 1 # for 1
else:
return fibonacci(n-1) + fibonacci(n-2) # recursion
def tester2():
try:
num = int(input("Term of Fibonacci Sequence: ")) # user input
# check if the number is negative
if num < 0:
print("You tested negative for COVID-19! Unfortunately, we only accept postive values at this Wendy's") # negative input
else:
print(num, "terms of the Fibonacci Sequence:")
for i in range(num):
print(fibonacci(i), end=" ")# list 0-n
print()
except:
print("INTEGER INTEGER INTEGER WHAT ARE YOU EVEN DOING") # non-integer input
# tester2()
def tester():
print(blue + "For loop" + white)
for_loop()
print(blue + "While loop" + white)
while_loop(0) # requires initial index to start while
print(blue + "Recursive loop" + white)
recursive_loop(0) # requires initial index to start recursion
# tester2()
# exit()
# hack3()
# tester() | 2.875 | 3 |
cloud_utils/config/logging_config.py | hyroai/cloud-utils | 1 | 80516 | import contextlib
import contextvars
import logging
import os
import sys
from typing import Iterator
_logging_prefix: contextvars.ContextVar[str] = contextvars.ContextVar(
"logging_prefix",
default="",
)
@contextlib.contextmanager
def logging_prefix(prefix: str) -> Iterator[str]:
token = _logging_prefix.set(prefix)
try:
yield prefix
finally:
_logging_prefix.reset(token)
def _remove_all_handlers() -> None:
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
def _context_filter(record) -> int:
record.prefix = _logging_prefix.get()
record.environment = os.getenv("HOSTNAME", "dev")
return True
class _MultilineFormatter(logging.Formatter):
def format(self, record: logging.LogRecord):
save_msg = str(record.msg)
output = []
for line in save_msg.splitlines():
record.msg = line
output.append(super().format(record))
output_str = "\n".join(output)
record.msg = save_msg
record.message = output_str
return output_str
def initialize_logger() -> None:
# TODO(erez): change to force=True once we update python version to 3.8.
_remove_all_handlers()
formatter = _MultilineFormatter(
"[%(environment)s] [%(prefix)s] [%(asctime)s,%(msecs)d] [%(levelname)-8s] [%(filename)s:%(lineno)d] %(message)s",
datefmt="%d-%m-%Y:%H:%M:%S",
)
stdout_handler = logging.StreamHandler(stream=sys.stdout)
stdout_handler.setFormatter(formatter)
stdout_handler.addFilter(_context_filter)
logging.basicConfig(level=logging.INFO, handlers=[stdout_handler])
# This needs to be before other imports otherwise not logging to stdout for some reason.
initialize_logger()
| 1.625 | 2 |
ui-test/creat-json/estat.py | dicechick373/statistics-hyogo | 0 | 80644 | import json
import os
import time
import urllib.parse
import urllib.request
import pandas as pd
import pathlib
#ルートディレクトリの設定(dataディレクトリ )
root_dir = pathlib.Path(__file__).parent.parent
#都道府県一覧の取得
c = os.path.join(root_dir, 'codes/preflist.json')
with open(c) as j:
prefList = json.load(j)
#市区町村一覧の取得
c = os.path.join(root_dir, 'codes/citylist.json')
with open(c) as j:
cityList = json.load(j)
#環境変数からESRAT-APPIDを取得
from dotenv import load_dotenv
load_dotenv()
ESTAT_APPID = os.getenv('ESTAT_APPID')
#estatAPIのパラメータセット(都道府県)
def setEstatParams(params,type):
#appIdの設定
p = {'appId':ESTAT_APPID}
# params['appId']=ESTAT_APPID
#cdAreaの設定
if type == 'prefecture' or type == 'prefectureRank':
prefCodes = [d.get('prefCode') for d in prefList['result']]
prefCodesStr = [f'{n:02}'+'000' for n in prefCodes]
# print(prefCodesStr)
p['cdArea'] = ','.join(prefCodesStr)
if type == 'city' or type == 'cityRank':
cityCodes = [d.get('cityCode') for d in cityList['result']]
p['cdArea'] = ','.join(cityCodes)
# print(cityCodes)
#statsDataIdの設定
p['statsDataId'] = params['statsDataId']
if('cdCat01' in params):
p['cdCat01'] = ','.join([d for d in params['cdCat01']])
return p
#estatAPIのレスポンス取得
def getEstatAPIResponse(params):
# print(params)
url = 'http://api.e-stat.go.jp/rest/2.1/app/json/getStatsData?'
url += urllib.parse.urlencode(params)
# print(url)
with urllib.request.urlopen(url) as response:
return json.loads(response.read().decode())
def saveJson(data,downloadPath,**kwargs):
print('...Saving ' + downloadPath)
with open(downloadPath, 'w') as f:
json.dump(data, f, **kwargs)
| 1.78125 | 2 |
lib/ansible/modules/cloud/alicloud/ali_eip_facts.py | atodorov/ansible-provider | 2 | 80772 | <filename>lib/ansible/modules/cloud/alicloud/ali_eip_facts.py
#!/usr/bin/python
# Copyright (c) 2017 Alibaba Group Holding Limited. <NAME> <<EMAIL>>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see http://www.gnu.org/licenses/.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ali_eip_facts
version_added: "2.8"
short_description: Gather facts about Elastic IP addresses in Alibaba Cloud
description:
- Gather facts about Elastic IP addresses in Alibaba Cloud
options:
eip_ids:
description:
- A list of EIP IDs that exist in your account.
aliases: ['ids']
name_prefix:
description:
- Use a name prefix to filter EIPs.
ip_address_prefix:
description:
- Use a ip address prefix to filter EIPs.
aliases: ['ip_prefix']
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. The filter keys can be
all of request parameters. See U(https://www.alibabacloud.com/help/doc-detail/36018.htm) for parameter details.
Filter keys can be same as request parameter name or be lower case and use underscore ("_") or dashes ("-") to
connect different words in one parameter. 'AllocationId' will be appended to I(eip_ids) automatically.
author:
- "<NAME> (@xiaozhu36)"
requirements:
- "python >= 2.6"
- "footmark >= 1.9.0"
extends_documentation_fragment:
- alicloud
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the Alibaba Cloud Guide for details.
# Gather facts about all EIPs
- ali_eip_facts:
# Gather facts about a particular EIP
- ali_eip_facts:
eip_ids:
- eip-xxxxxxx
- eip-yyyyyyy
filters:
status: Available
# Gather facts about a particular EIP
- ali_eip_facts:
filters:
associated_instance_type: EcsInstance
# Gather facts based on ip_address_prefix
- ali_eip_facts:
ip_address_prefix: 72.16
'''
RETURN = '''
eips:
description: List of matching elastic ip addresses
returned: always
type: complex
contains:
allocation_id:
description: The EIP id
returned: always
type: string
sample: "eip-2zee1nu68juox4"
allocation_time:
description: The time the EIP was created
returned: always
type: string
sample: "2018-12-31T12:12:52Z"
bandwidth:
description: Maximum bandwidth from the internet network
returned: always
type: int
sample: 5
charge_type:
description: The eip charge type.
returned: always
type: string
sample: "PostPaid"
description:
description: interface description
returned: always
type: string
sample: "My new EIP"
id:
description: Allocated EIP id (alias for allocation_id)
returned: always
type: string
sample: "eip-2zee1nu68juox4"
instance_id:
description: Associated instance id
returned: always
type: string
sample: "i-123456"
instance_region_id:
description: The region id in which the associated instance
returned: always
type: string
sample: "cn-beijing"
instance_type:
description: Associated instance type
returned: always
type: string
sample: "EcsInstance"
internet_charge_type:
description: The EIP charge type.
returned: always
type: string
sample: "PayByTraffic"
ip_address:
description: The IP address of the EIP
returned: always
type: string
sample: "192.168.127.12"
name:
description: The EIP name
returned: always
type: string
sample: "from-ansible"
status:
description: The EIP status
returned: always
type: string
sample: "inuse"
tags:
description: Any tags assigned to the EIP.
returned: always
type: dict
sample: {}
ids:
description: List of elastic ip address IDs
returned: always
type: list
sample: [eip-12345er, eip-3245fs]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.alicloud_ecs import ecs_argument_spec, vpc_connect
HAS_FOOTMARK = False
try:
from footmark.exception import ECSResponseError
HAS_FOOTMARK = True
except ImportError:
HAS_FOOTMARK = False
def main():
argument_spec = ecs_argument_spec()
argument_spec.update(
dict(
eip_ids=dict(type='list', aliases=['ids']),
name_prefix=dict(),
ip_address_prefix=dict(type='str', aliases=['ip_prefix']),
filters=dict(type='dict'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if HAS_FOOTMARK is False:
module.fail_json(msg='footmark required for the module ali_eip_facts')
eips = []
ids = []
eip_ids = module.params["eip_ids"]
if not eip_ids:
eip_ids = []
filters = module.params["filters"]
if not filters:
filters = {}
new_filters = {}
for key, value in filters.items():
if str(key).lower().replace("-").replace("_") == "allocationid" and value not in eip_ids:
eip_ids.append(value)
continue
new_filters[key] = value
name_prefix = module.params["name_prefix"]
address_prefix = module.params["ip_address_prefix"]
try:
for eip in vpc_connect(module).describe_eip_addresses(**new_filters):
if name_prefix and not str(eip.name).startswith(name_prefix):
continue
if address_prefix and not str(eip.IpAddress).startswith(address_prefix):
continue
if eip_ids and eip.allocation_id not in eip_ids:
continue
eips.append(eip.read())
ids.append(eip.id)
module.exit_json(changed=False, ids=ids, eips=eips)
except Exception as e:
module.fail_json(msg=str("Unable to get eips, error:{0}".format(e)))
if __name__ == '__main__':
main()
| 1.140625 | 1 |
pano/views/api/report_agent_log.py | jeroenzeegers/panopuppet | 0 | 80900 | <reponame>jeroenzeegers/panopuppet
import arrow
import json
from django.contrib.auth.decorators import login_required
from django.shortcuts import HttpResponse
from django.template import defaultfilters as filters
from django.utils.timezone import localtime
from django.views.decorators.cache import cache_page
from pano.puppetdb import puppetdb
from pano.puppetdb.puppetdb import get_server
from pano.settings import CACHE_TIME
__author__ = 'etaklar'
@login_required
@cache_page(CACHE_TIME)
def report_log_json(request, report_hash=None):
source_url, source_certs, source_verify = get_server(request)
# Redirects to the events page if GET param latest is true..
context = {}
if report_hash is None:
context['error'] = 'Report Hash not provided.'
return HttpResponse(json.dumps(context), content_type="application/json")
report_logs = puppetdb.api_get(
api_url=source_url,
cert=source_certs,
verify=source_verify,
path='/reports/' + report_hash + '/logs',
api_version='v4',
)
if 'error' in report_logs:
context = report_logs
return HttpResponse(json.dumps(context), content_type="application/json")
# Remove the dict from the list...
for log in report_logs:
# Parse... 2015-09-18T18:02:04.753163330+02:00
# Puppetlabs... has a super long millisecond counter (9 digits!!!)
# We need to trim those down...
time = log['time'][0:26] + log['time'][-6:-3] + log['time'][-2:]
time = arrow.get(time).to('UTC').datetime
log['time'] = filters.date(localtime(time), 'Y-m-d H:i:s')
context['agent_log'] = report_logs
context['report_hash'] = report_hash
return HttpResponse(json.dumps(context), content_type="application/json")
| 1.351563 | 1 |
CLI/mock_api/utils.py | MeatBoyed/PasswordBank2 | 1 | 81028 | <reponame>MeatBoyed/PasswordBank2
from getpass import getpass
# Utility libary for common functions that every page needs access too. To stop coding same logic processes.
def GetSelection():
while True:
try:
selection = int(input(": "))
if selection == "":
print("Enter the site's name")
else:
break
except ValueError:
print("Enter a valid option")
except Exception as e:
print("An unexpected error occured!\n", str(e))
return selection
def GetUsername():
while True:
try:
username = str(input("Username: "))
if username == "":
print("Username is compulsory!")
else:
break
except ValueError:
print("Enter a valid Username")
except Exception as e:
print("An unexpected error occured!\n", str(e))
return username
def GetSitename():
while True:
try:
sitename = str(input("Site's name: "))
if sitename == "":
print("Enter the site's name")
else:
break
except ValueError:
print("Enter a valid sitename")
except Exception as e:
print("An unexpected error occured!\n", str(e))
return sitename
def GetAccountURL():
while True:
try:
accountUrl = str(input("url (optional): "))
if accountUrl == "":
break
else:
break
except ValueError:
print("Enter a valid Username")
except Exception as e:
print("An unexpected error occured!\n", str(e))
return accountUrl
def GetEmail():
while True:
try:
email = str(input("Email: "))
if email == "":
print("Email is compulsory!")
else:
break
except ValueError:
print("Enter a valid Email")
except Exception as e:
print("An unexpected error occured!\n", str(e))
return email
def GetPassword():
verified = False
while True:
try:
password1 = getpass("Password: ")
if password1 == "":
print("Password is compulsory!")
else:
while True:
try:
password2 = getpass("Re-Enter Password again: ")
if password2 == "" or password1 != password2:
print("Password entery failed. Try again")
break
else:
verified = True
break
except ValueError:
print("Enter a valid Password")
except Exception as e:
print("An unexpected error occured!\n", str(e))
if verified:
break
except ValueError:
print("Enter a valid Password")
except Exception as e:
print("An unexpected error occured!\n", str(e))
return password2
| 2.328125 | 2 |
cvpysdk/instances/sqlinstance.py | Jayesh-Jain/SDK | 0 | 81156 | <filename>cvpysdk/instances/sqlinstance.py
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------------
# Copyright Commvault Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------------------------
"""File for operating on a SQL Server Instance.
SQLServerInstance is the only class defined in this file.
SQLServerInstance: Derived class from Instance Base class, representing a sql server instance,
and to perform operations on that instance
SQLServerInstance:
_get_instance_properties() -- gets the instance related properties of SQL instance.
_get_instance_properties_json() -- gets all the instance related properties of SQL instance.
_restore_request_json() -- returns the restore request json
_process_restore_response() -- processes response received for the Restore request
_get_sql_restore_options() -- returns the dict containing destination sql server names
_run_backup() -- runs full backup for this subclients and appends the
job object to the return list
_process_browse_request() -- processes response received for Browse request
_recoverypoint_request_json() -- returns a json to be sent to server to create
a recovery point
get_recovery_points() -- lists all the recovery points
_process_recovery_point_request() -- starts the recovery point job and process
the response
backup() -- runs full backup for all subclients associated
with this instance
browse() -- gets the content of the backup for this instance
browse_in_time() -- gets the content of the backup for this instance
in the time range specified
restore() -- runs the restore job for specified
restore_to_destination_server() -- restores the database on destination server
create_recovery_point() -- creates a recovery point on destination server
table_level_restore() -- starts the table level restore job
_table_level_restore_request_json() -- returns a json to be sent to the server for
table level restore job
"""
from __future__ import unicode_literals
import re
import time
import datetime
import threading
from base64 import b64encode
from ..instance import Instance
from ..exception import SDKException
from ..job import Job
from ..constants import SQLDefines
class SQLServerInstance(Instance):
"""Derived class from Instance Base class, representing a SQL Server instance,
and to perform operations on that Instance."""
def _get_instance_properties(self):
"""Gets the properties of this instance.
Raises:
SDKException:
if response is empty
if response is not success
"""
super(SQLServerInstance, self)._get_instance_properties()
self._mssql_instance_prop = self._properties.get('mssqlInstance', {})
def _get_instance_properties_json(self):
"""get the all instance related properties of this instance.
Returns:
dict - all subclient properties put inside a dict
"""
instance_json = {
"instanceProperties":
{
"instance": self._instance,
"instanceActivityControl": self._instanceActivityControl,
"mssqlInstance": self._mssql_instance_prop,
"contentOperationType": 1
}
}
return instance_json
def _restore_request_json(
self,
content_to_restore,
restore_path=None,
drop_connections_to_databse=False,
overwrite=True,
destination_instance=None,
to_time=None,
sql_restore_type=SQLDefines.DATABASE_RESTORE,
sql_recover_type=SQLDefines.STATE_RECOVER,
undo_path=None,
restricted_user=None
):
"""Returns the JSON request to pass to the API as per the options selected by the user.
Args:
content_to_restore (list): databases list to restore
restore_path (list, optional): list of dicts for restore paths of database files
drop_connections_to_databse (bool, optional): drop connections to database during restore
overwrite (bool, optional): overwrite database on restore
destination_instance (str): restore databases to this sql instance
to_time (str, optional): restore to time
sql_restore_type (str, optional): type of sql restore state
(DATABASE_RESTORE, STEP_RESTORE, RECOVER_ONLY)
sql_recover_type (str, optional): type of sql restore state
(STATE_RECOVER, STATE_NORECOVER, STATE_STANDBY)
undo_path (str, optional): file path for undo path for sql server standby restore
restricted_user (bool, optional): Restore database in restricted user mode
Returns:
dict - JSON request to pass to the API
"""
self._get_sql_restore_options(content_to_restore)
if destination_instance is None:
destination_instance = (self.instance_name).lower()
else:
if destination_instance not in self.destination_instances_dict:
raise SDKException(
'Instance',
'102',
'No Instance exists with name: {0}'.format(destination_instance)
)
destination_client_id = int(
self.destination_instances_dict[destination_instance]['clientId']
)
destination_instance_id = int(
self.destination_instances_dict[destination_instance]['instanceId']
)
request_json = {
"taskInfo": {
"associations": [{
"clientName": self._agent_object._client_object.client_name,
"appName": self._agent_object.agent_name,
"instanceName": self.instance_name
}],
"task": {
"initiatedFrom": 1,
"taskType": 1
},
"subTasks": [{
"subTask": {
"subTaskType": 3,
"operationType": 1001
},
"options": {
"restoreOptions": {
"sqlServerRstOption": {
"sqlRecoverType": sql_recover_type,
"dropConnectionsToDatabase": drop_connections_to_databse,
"overWrite": overwrite,
"sqlRestoreType": sql_restore_type,
"database": content_to_restore,
"restoreSource": content_to_restore
},
"commonOptions": {
},
"destination": {
"destinationInstance": {
"clientId": destination_client_id,
"instanceName": destination_instance,
"instanceId": destination_instance_id
},
"destClient": {
"clientId": destination_client_id
}
}
}
}
}]
}
}
if sql_recover_type == SQLDefines.STATE_STANDBY:
if undo_path is not None:
undo_path_dict = {
"fileOption": {
"mapFiles": {
"renameFilesSuffix": undo_path
}
}
}
request_json['taskInfo']['subTasks'][0]['options']['restoreOptions'].update(undo_path_dict)
else:
raise SDKException('Instance', '102', 'Failed to set Undo Path for Standby Restore.')
if restore_path is not None:
restore_path_dict = {
"device":
restore_path
}
request_json['taskInfo']['subTasks'][0]['options']['restoreOptions'][
'sqlServerRstOption'].update(restore_path_dict)
if restricted_user is not None:
restricted_user_dict = {
"dbOnly":
restricted_user
}
request_json['taskInfo']['subTasks'][0]['options']['restoreOptions'][
'sqlServerRstOption'].update(restricted_user_dict)
if to_time is not None:
to_time_dict = {
"browseOption": {
"timeRange": {
"toTimeValue": to_time
}
}
}
request_json['taskInfo']['subTasks'][0]['options']['restoreOptions'].update(to_time_dict)
return request_json
def _process_restore_response(self, request_json):
"""Runs the CreateTask API with the request JSON provided for Restore,
and returns the contents after parsing the response.
Args:
request_json (dict): JSON request to run for the API
Returns:
object - instance of the Job class for this restore job
Raises:
SDKException:
if restore job failed
if response is empty
if response is not success
"""
flag, response = self._commcell_object._cvpysdk_object.make_request(
'POST', self._commcell_object._services['RESTORE'], request_json
)
if flag:
if response.json():
if "jobIds" in response.json():
time.sleep(1)
return Job(self._commcell_object, response.json()['jobIds'][0])
elif "errorCode" in response.json():
error_message = response.json()['errorMessage']
o_str = 'Restore job failed\nError: "{0}"'.format(error_message)
raise SDKException('Instance', '102', o_str)
else:
raise SDKException('Instance', '102', 'Failed to run the restore job')
else:
raise SDKException('Response', '102')
else:
response_string = self._commcell_object._update_response_(response.text)
raise SDKException('Response', '101', response_string)
def _get_sql_restore_options(self, content_to_restore):
"""Runs the SQL/Restoreoptions API with the request JSON provided,
and returns the contents after parsing the response.
Args:
content_to_restore (list): Databases list to restore
Returns:
dict - dictionary consisting of the sql destination server options
Raises:
SDKException:
if failed to get SQL instances
if no instance exits on commcell
if response is empty
if response is not success
"""
contents_dict = []
for content in content_to_restore:
database_dict = {
"databaseName": content
}
contents_dict.append(database_dict)
request_json = {
"restoreDbType": 0,
"sourceInstanceId": int(self.instance_id),
"selectedDatabases": contents_dict
}
webservice = self._commcell_object._services['SQL_RESTORE_OPTIONS']
flag, response = self._commcell_object._cvpysdk_object.make_request(
"POST", webservice, request_json
)
self.destination_instances_dict = {}
if flag:
if response.json():
if 'sqlDestinationInstances' in response.json():
for instance in response.json()['sqlDestinationInstances']:
instances_dict = {
instance['genericEntity']['instanceName'].lower(): {
"instanceId": int(instance['genericEntity']['instanceId']),
"clientId": int(instance['genericEntity']['clientId'])
}
}
self.destination_instances_dict.update(instances_dict)
elif 'error' in response.json():
if 'errorMessage' in response.json()['error']:
error_message = response.json()['error']['errorMessage']
raise SDKException('Instance', '102', error_message)
else:
raise SDKException('Instance', '102', 'No Instance exists on commcell')
else:
raise SDKException('Response', '102')
else:
response_string = self._commcell_object._update_response_(response.text)
raise SDKException('Response', '101', response_string)
return response.json()
def _run_backup(self, subclient_name, return_list):
"""Triggers full backup job for the given subclient, and appends its Job object to list
The SDKExcpetion class instance is appended to the list,
if any exception is raised while running the backup job for the Subclient.
Args:
subclient_name (str): Name of the subclient to trigger the backup for
return_list (list): List to append the job object to
"""
try:
job = self.subclients.get(subclient_name).backup('Full')
if job:
return_list.append(job)
except SDKException as excp:
return_list.append(excp)
def _process_browse_request(self, browse_request, get_full_details=False):
"""Runs the SQL Instance Browse API with the request JSON provided for the operation
specified, and returns the contents after parsing the response.
Args:
browse_request (dict): JSON request to be sent to Server
Returns:
list - list of all databases
dict - database names along with details like backup created time
and database version
Raises:
SDKException:
if response is empty
if response is not success
"""
flag, response = self._commcell_object._cvpysdk_object.make_request("GET", browse_request)
full_result = []
databases = []
if flag:
if response.json():
if 'sqlDatabase' in response.json():
# returns whole dict if requested
if get_full_details:
return response.json()["sqlDatabase"]
for database in response.json()['sqlDatabase']:
database_name = database['databaseName']
created_time = datetime.datetime.fromtimestamp(
int(database['createdTime'])
).strftime('%d-%m-%Y %H:%M:%S')
version = database['version']
temp = {
database_name: [created_time, version]
}
databases.append(database_name)
full_result.append(temp)
return databases, full_result
else:
raise SDKException('Response', '102')
else:
response_string = self._commcell_object._update_response_(response.text)
raise SDKException('Response', '101', response_string)
def backup(self):
"""Run full backup job for all subclients in this instance.
Returns:
list - list containing the job objects for the full backup jobs started for
the subclients in the backupset
"""
return_list = []
thread_list = []
all_subclients = self.subclients._subclients
if all_subclients:
for subclient in all_subclients:
thread = threading.Thread(
target=self._run_backup, args=(subclient, return_list)
)
thread_list.append(thread)
thread.start()
for thread in thread_list:
thread.join()
return return_list
def browse(self, get_full_details=False):
"""Gets the list of the backed up databases for this instance.
Args:
get_full_details (bool) - if True returns dict with all databases
with last full backupjob details, default false
Returns:
list - list of all databases
dict - database names along with details like backup created time
and database version
Raises:
SDKException:
if response is empty
if response is not success
"""
browse_request = self._commcell_object._services['INSTANCE_BROWSE'] % (
self._agent_object._client_object.client_id, "SQL", self.instance_id
)
return self._process_browse_request(browse_request, get_full_details=get_full_details)
def browse_in_time(self, from_date=None, to_date=None):
"""Gets the list of the backed up databases for this instance in the given time frame.
Args:
from_date (str): date to get the contents after. Format: dd/MM/YYYY
Gets contents from 01/01/1970 if not specified. Defaults to None.
to_date (str): date to get the contents before. Format: dd/MM/YYYY
Gets contents till current day if not specified. Defaults to None.
Returns:
list - list of all databases
dict - database names along with details like backup created timen
and database version
Raises:
SDKException:
if response is empty
if response is not success
"""
if from_date and (from_date != '01/01/1970' and from_date != '1/1/1970'):
temp = from_date.split('/')
if (len(temp) == 3 and
0 < int(temp[0]) < 32 and
0 < int(temp[1]) < 13 and
int(temp[2]) > 1969 and
(re.search(r'\d\d/\d\d/\d\d\d\d', from_date) or
re.search(r'\d/\d/\d\d\d\d', from_date))):
from_date = int(time.mktime(time.strptime(from_date, '%d/%m/%Y')))
else:
raise SDKException('Instance', '103')
else:
from_date = 0
if to_date and (to_date != '01/01/1970' and to_date != '1/1/1970'):
temp = to_date.split('/')
if (len(temp) == 3 and
0 < int(temp[0]) < 32 and
0 < int(temp[1]) < 13 and
int(temp[2]) > 1969 and
(re.search(r'\d\d/\d\d/\d\d\d\d', to_date) or
re.search(r'\d/\d/\d\d\d\d', to_date))):
today = time.strftime('%d/%m/%Y')
if today == to_date:
to_date = int(time.time())
else:
to_date = int(time.mktime(time.strptime(to_date, '%d/%m/%Y')))
else:
raise SDKException('Instance', '103')
else:
to_date = int(time.time())
browse_request = self._commcell_object._services['INSTANCE_BROWSE'] % (
self._agent_object._client_object.client_id, "SQL", self.instance_id
)
browse_request += '?fromTime={0}&toTime={1}'.format(from_date, to_date)
return self._process_browse_request(browse_request)
def get_recovery_points(self):
"""
lists all the recovery points.
returns:
object (list) - list of all the recovery points and clones
"""
flag, response = self._commcell_object._cvpysdk_object.make_request(
'GET', self._commcell_object._services["SQL_CLONES"], None
)
if flag:
response_json = response.json()
if "rpObjectList" in response_json:
return response_json["total"], response_json["rpObjectList"]
return 0, None
raise SDKException('Response', '102', "failed to get recovery points")
def _recoverypoint_request_json(self,
dbname,
expire_days=1,
recovery_point_name=None,
point_in_time=0,
destination_instance=None,
snap=False
):
"""
creates and returns a request json for the recovery point creation
Args:
dbname (str) -- database to be restored
expire_days (int) -- days for which the database will be restored
default 1,. 1 day
recovery_point_name (str) -- name of the recovery point to be created
default None. creates a db with db_name + <timestamp>
point_in_time (timestamp) -- unix time for the point in time recovery point creation
default 0. performs restore to last backup
destination_instance (str) -- name of the destination instance in whcih recovery point is to be
created.
default None. creates in the same instance
snap (bool) -- If the recovery point to be created is for snap setup
default False
returns:
request_json (Dict) -- request json for create recovery points
"""
if recovery_point_name is None:
timestamp = datetime.datetime.timestamp(datetime.datetime.now())
recovery_point_name = dbname + str(int(timestamp))
instance = self
if destination_instance is not None:
instance = SQLServerInstance(self._agent_object, destination_instance)
# fetching db details
flag, response = self._commcell_object._cvpysdk_object.make_request(
'GET', self._commcell_object._services["SQL_DATABASES"] % dbname, None
)
db_details = None
if flag:
response = response.json()
db_details = response["SqlDatabase"][0]
else:
raise SDKException('Response', 102, "failed to fetch db details")
# retrieving the physical paths and logical file names
restore_options = self._get_sql_restore_options([dbname])
physical_files = []
logical_files = []
for files in restore_options["sqlDbdeviceItem"]:
physical_files.append(files["fileName"])
logical_files.append(files["logicalFileName"])
job = self._commcell_object.job_controller.get(db_details["jobId"])
# fetching job details
browse_dict = self.browse(get_full_details=True)
fullbackup_job = None
for dbs in browse_dict:
if dbs["databaseName"] == dbname:
fullbackup_job = dbs["jobId"]
break
if fullbackup_job is None:
raise Exception("failed to get last full backup job details")
request_json = {
"opType": 0,
"session": {},
"queries": [
{
"type": 0,
"queryId": "0"
}
],
"mode": {
"mode": 3
},
"advOptions": {
"copyPrecedence": 0,
"advConfig": {
"extendedConfig": {
"browseAdvConfigLiveBrowse": {
"useISCSIMount": False
}
},
"applicationMining": {
"appType": 81,
"agentVersion": 0,
"isApplicationMiningReq": True,
"browseInitReq": {
"database": dbname,
"bCreateRecoveryPoint": True,
"destDatabase": recovery_point_name,
"appMinType": 2 if not snap else 0,
"expireDays": expire_days,
"instance": {
"clientId": int(self._agent_object._client_object.client_id),
"instanceName": instance.instance_name,
"instanceId": int(instance.instance_id),
"applicationId": 81
},
"miningJobs": [fullbackup_job],
"client": {
"clientId": int(db_details["cId"])
},
"phyfileRename": physical_files,
"logfileRename": logical_files,
}
}
}
},
"ma": {
"clientId": int(self._agent_object._client_object.client_id)
},
"options": {
"instantSend": True,
"skipIndexRestore": False
},
"entity": {
"drivePoolId": 0,
"subclientId": job.details["jobDetail"]["generalInfo"]["subclient"]["subclientId"],
"applicationId": 81,
"libraryId": job.details["jobDetail"]["generalInfo"]["mediaLibrary"]["libraryId"],
"backupsetId": job.details["jobDetail"]["generalInfo"]["subclient"]["backupsetId"],
"instanceId": int(self.instance_id),
"clientId": db_details["cId"]
},
"timeRange": {
"fromTime": 0,
"toTime": point_in_time
}
}
return request_json
def _process_recovery_point_request(self, request_json):
"""
process the create recovery job browse request
Args:
request_json (dict): JSON request to run for the API
Returns:
object (Job) - instance of the Job class for this restore job
recovery point Id (int) : id to uniquely access the recovery point
dbname (str) - name of the db that is created.
Raises:
SDKException:
if restore job failed
if response is empty
if response is not success
"""
flag, response = self._commcell_object._cvpysdk_object.make_request(
'POST', self._commcell_object._services['BROWSE'], request_json
)
if flag:
response_json = response.json()
if response_json:
if "browseResponses" in response_json:
d = response_json['browseResponses'][0]["browseResult"]["advConfig"]["applicationMining"]["browseInitResp"]
try:
return Job(self._commcell_object, d["recoveryPointJobID"]), d["recoveryPointID"], d["edbPath"]
except Exception as msg:
# server code 102 response is empty or doesn't contain required parameters
raise SDKException('Instance', 102, msg)
elif "errorCode" in response.json():
error_message = response.json()['errorMessage']
o_str = 'create recovery point job failed\nError: "{0}"'.format(error_message)
raise SDKException('Instance', '102', o_str)
else:
raise SDKException('Instance', '102', 'Failed to run the restore job')
else:
raise SDKException('Response', '102')
else:
response_string = self._commcell_object._update_response_(response.text)
raise SDKException('Response', '101', response_string)
def create_recovery_point(self,
database_name,
new_database_name=None,
destination_instance=None,
expire_days=1,
snap=False
):
"""stats a granular restore or recovery point job and creates a on demand restore of a database
agrs:
database_name (str) : Name of the database for granular restore
new_database_name (str) : Name of the newly created database database
default: None creates a database with original dbname+ <TIMESTAMP>
destination_instance (str): Destination server(instance) name.
default None .creates a database in the same instance
expire_days (int) : days for which the database will be available
default 1 day.
snap (bool) : create recovery point for the snap setup
dafault False
returns:
object (Job) : instance of the Job class for this restore job
recovery point Id (int) : id to uniquely access the recovery point
recovery_point_name (str) : name of the database created
"""
# write a wrapper over this to allow creating more than one recovery points at a time is neccessary
if not isinstance(database_name, str):
raise SDKException('Instance', '101')
if destination_instance is None:
destination_instance = self.instance_name
else:
destination_instance = destination_instance.lower()
recoverypoint_request = self._recoverypoint_request_json(
database_name,
expire_days=expire_days,
recovery_point_name=new_database_name,
destination_instance=destination_instance,
snap=snap
)
return self._process_recovery_point_request(recoverypoint_request)
def table_level_restore(self,
src_db_name,
tables_to_restore,
destination_db_name,
rp_name,
include_child_tables,
include_parent_tables):
"""Starts a table level restore
Args:
src_db_name(str) : Name of the source database
tables_to_restore(list) : List of tables to restore
destination_db_name(str) : Destination database name
rp_name(str) : Name of recovery point
include_child_tables(bool) : Includes all child tables in restore.
include_parent_tables(bool) : Includes all parent tables in restore.
Returns:
job : Instance of Job class for this restore job"""
if not (isinstance(src_db_name, str)
or isinstance(tables_to_restore, list)
or isinstance(destination_db_name, str)):
raise SDKException('Instance', '101')
request_json = self._table_level_restore_request_json(
src_db_name,
tables_to_restore,
destination_db_name,
rp_name,
include_child_tables,
include_parent_tables
)
return self._process_restore_response(request_json)
def _table_level_restore_request_json(self,
src_db,
tables_to_restore,
destination_db,
rp_name,
include_child_tables,
include_parent_tables):
"""Creates and returns a request json for table level restore
Args:
src_db(str) : Name of the source database
tables_to_restore(list) : List of tables to restore
destination_db(str) : Destination database name
rp_name(str) : Name of the corresponding recovery point
include_child_tables(bool) : Includes all child tables in restore.
include_parent_tables(bool) : Includes all parent tables in restore.
Returns:
request_json(dict) : Request json for table level restore"""
client_name = self._agent_object._client_object.client_name
client_id = int(self._agent_object._client_object.client_id)
instance_name = self.instance_name
instance_id = int(self.instance_id)
source_item = []
for table in tables_to_restore:
source_item.append('/' + table)
request_json = {
"taskInfo": {
"associations": [
{
"subclientId": -1,
"copyId": 0,
"applicationId": 81,
"clientName": client_name,
"backupsetId": -1,
"instanceId": instance_id,
"clientId": client_id,
"instanceName": instance_name,
"_type_": 5,
"appName": self._agent_object.agent_name
}
],
"task": {
"ownerId": 1,
"taskType": 1,
"ownerName": "admin",
"sequenceNumber": 0,
"initiatedFrom": 1,
"policyType": 0,
"taskId": 0,
"taskFlags": {
"isEZOperation": False,
"disabled": False
}
},
"subTasks": [
{
"subTask": {
"subTaskType": 3,
"operationType": 1001
},
"options": {
"adminOpts": {
"contentIndexingOption": {
"subClientBasedAnalytics": False
}
},
"restoreOptions": {
"virtualServerRstOption": {
"isBlockLevelReplication": False
},
"sqlServerRstOption": {
"cloneEnv": False,
"ffgRestore": False,
"cloneResrvTimePeriod": 0,
"vSSBackup": False,
},
"dbArchiveRestoreOptions": {
"restoreAllDependentTables": include_child_tables,
"isTableLevelRestore": True,
"destDatabaseName": destination_db,
"restoreToSourceDatabase": True,
"restoreToHistoryDatabase": False,
"restoreAllParentTables": include_parent_tables,
"databaseName": {
"clientId": client_id,
"instanceName": instance_name,
"instanceId": instance_id,
"applicationId": 81
},
"sqlArchiveOptions": {
"sourceDBName": src_db,
"sourceDatabaseInfo": {
"dbName": rp_name,
"instance": {
"clientId": client_id,
"instanceName": instance_name,
"instanceId": instance_id,
"applicationId": 81
}
}
}
},
"browseOption": {
"listMedia": False,
"useExactIndex": False,
"noImage": True,
"commCellId": self._commcell_object.commcell_id,
"mediaOption": {
"useISCSIMount": False,
"mediaAgent": {
"mediaAgentId": 0,
"_type_": 11
},
"library": {
"_type_": 9,
"libraryId": 0
},
"copyPrecedence": {
"copyPrecedenceApplicable": False
},
"drivePool": {
"_type_": 47,
"drivePoolId": 0
}
},
"backupset": {
"backupsetId": -1,
"clientId": client_id
},
"timeRange": {}
},
"commonOptions": {
"clusterDBBackedup": False,
"restoreToDisk": False,
"isDBArchiveRestore": True,
"copyToObjectStore": False,
"onePassRestore": False,
"syncRestore": False
},
"destination": {
"destClient": {
"clientId": client_id,
"clientName": client_name
}
},
"fileOption": {
"sourceItem": source_item,
"browseFilters": [
"<?xml version='1.0' encoding='UTF-8'?>"
"<databrowse_Query type=\"0\" queryId=\"0\" />"
]
},
"dbDataMaskingOptions": {
"isStandalone": False
}
},
"commonOpts": {
"notifyUserOnJobCompletion": False,
"perfJobOpts": {
"rstPerfJobOpts": {
"mediaReadSpeed": False,
"pipelineTransmittingSpeed": False
}
}
}
}
}
]
}
}
return request_json
def restore(
self,
content_to_restore,
drop_connections_to_databse=False,
overwrite=True,
restore_path=None,
to_time=None,
sql_restore_type=SQLDefines.DATABASE_RESTORE,
sql_recover_type=SQLDefines.STATE_RECOVER,
undo_path=None,
restricted_user=None,
destination_instance=None
):
"""Restores the databases specified in the input paths list.
Args:
content_to_restore (list): List of databases to restore.
drop_connections_to_databse (bool): Drop connections to database. Defaults to False.
overwrite (bool): Unconditional overwrite files during restore. Defaults to True.
restore_path (str): Existing path on disk to restore. Defaults to None.
to_time (str): Restore to time. Defaults to None.
sql_recover_type (str): Type of sql recovery state. (STATE_RECOVER, STATE_NORECOVER, STATE_STANDBY)
Defaults to STATE_RECOVER.
sql_restore_type (str): Type of sql restore state. (DATABASE_RESTORE, STEP_RESTORE, RECOVER_ONLY)
Defaults to DATABASE_RESTORE.
undo_path (str): File path for undo path for sql standby restores. Defaults to None.
restricted_user (bool): Restore database in restricted user mode. Defaults to None.
destination_instance (str): Destination instance to restore too. Defaults to None.
Returns:
object - instance of the Job class for this restore job
Raises:
SDKException:
if content_to_restore is not a list
if response is empty
if response is not success
"""
if not isinstance(content_to_restore, list):
raise SDKException('Instance', '101')
if destination_instance is not None:
destination_instance = destination_instance.lower()
request_json = self._restore_request_json(
content_to_restore,
drop_connections_to_databse=drop_connections_to_databse,
overwrite=overwrite,
restore_path=restore_path,
to_time=to_time,
sql_restore_type=sql_restore_type,
sql_recover_type=sql_recover_type,
undo_path=undo_path,
restricted_user=restricted_user,
destination_instance=destination_instance
)
return self._process_restore_response(request_json)
def restore_to_destination_server(
self,
content_to_restore,
destination_server,
drop_connections_to_databse=False,
overwrite=True,
restore_path=None):
"""Restores the databases specified in the input paths list.
Args:
content_to_restore (list): List of databases to restore.
destination_server (str): Destination server(instance) name.
drop_connections_to_databse (bool): Drop connections to database. Defaults to False.
overwrite (bool): Unconditional overwrite files during restore. Defaults to True.
restore_path (str): Existing path on disk to restore. Default to None.
Returns:
object - instance of the Job class for this restore job
Raises:
SDKException:
if content_to_restore is not a list
if response is empty
if response is not success
"""
if not isinstance(content_to_restore, list):
raise SDKException('Instance', '101')
request_json = self._restore_request_json(
content_to_restore,
drop_connections_to_databse=drop_connections_to_databse,
overwrite=overwrite,
restore_path=restore_path,
destination_instance=destination_server
)
return self._process_restore_response(request_json)
@property
def mssql_instance_prop(self):
""" getter for sql server instance properties """
return self._mssql_instance_prop
@mssql_instance_prop.setter
def mssql_instance_prop(self, value):
"""Setter for SQL server instance properties
Args:
value (list) -- list of the category and properties to update on the instance
Returns:
list - list of the appropriate JSON for an agent to send to the POST Instance API
"""
category, prop = value
self._set_instance_properties(category, prop)
def vss_option(self, value):
"""Enables or disables VSS option on SQL instance
Args:
value (bool) -- Boolean value whether to set VSS option on or off
"""
request_json = {
"useVss": value
}
self._set_instance_properties("_mssql_instance_prop", request_json)
def vdi_timeout(self, value):
"""Sets the SQL VDI timeout value on SQL instance
Args:
value (int) -- value of vdi timeout for sql vdi operations
"""
request_json = {
"vDITimeOut": value
}
self._set_instance_properties("_mssql_instance_prop", request_json)
def impersonation(self, enable, username=None, password=None):
"""Sets impersonation on SQL instance with local system account or provided user account.
Args:
enable (bool) -- boolean value whether to set impersonation
username (str, optional) -- user to set for impersonation.
Defaults to local system account if enabled is True and username not provided.
password (str, optional) -- password of user <PASSWORD>
"""
if enable and username is None:
impersonate_json = {
"overrideHigherLevelSettings": {
"overrideGlobalAuthentication": enable,
"useLocalSystemAccount": True
}
}
elif enable and username is not None:
if password is not None:
impersonate_json = {
"overrideHigherLevelSettings": {
"overrideGlobalAuthentication": enable,
"useLocalSystemAccount": False,
"userAccount": {
"userName": username,
"password": b64encode(password.encode()).decode()
}
}
}
else:
raise SDKException(
'Instance',
'102',
'Please provide password to set impersonation for user [{0}]'.format(username)
)
else:
impersonate_json = {
"overrideHigherLevelSettings": {
"overrideGlobalAuthentication": enable,
"useLocalSystemAccount": False
}
}
self._set_instance_properties("_mssql_instance_prop", impersonate_json)
| 1.21875 | 1 |
music21/figuredBass/examples.py | cuthbertLab/music21 | 1,449 | 81284 | <filename>music21/figuredBass/examples.py
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# Name: examples.py
# Purpose: music21 class which allows running of test cases
# Authors: <NAME>
#
# Copyright: Copyright © 2010-2011 <NAME> and the music21 Project
# License: BSD, see license.txt
# ------------------------------------------------------------------------------
'''
Each of the example methods in this module provides a figured bass line as a
:class:`~music21.figuredBass.realizer.FiguredBassLine` instance.
These can be realized by calling
:meth:`~music21.figuredBass.realizer.FiguredBassLine.realize`, which takes in an
optional :class:`~music21.figuredBass.rules.Rules` object.
The result is a :class:`~music21.figuredBass.realizer.Realization`
object which can generate realizations as instances of
:class:`~music21.stream.Score`. These realizations can then be displayed
in external software such as MuseScore or Finale by
calling :meth:`~music21.base.Music21Object.show`.
'''
import copy
import unittest
from music21.figuredBass import realizer
from music21.figuredBass import rules
# ------------------------------------------------------------------------------
def exampleA():
'''
This example was a homework assignment for 21M.302: Harmony & Counterpoint II
at MIT in the fall of 2010, taught by <NAME> of the MIT Music Program.
>>> from music21.figuredBass import examples
>>> fbLine = examples.exampleA()
>>> #_DOCS_SHOW fbLine.generateBassLine().show()
.. image:: images/figuredBass/fbExamples_bassLineA.*
:width: 700
The following is a realization of fbLine in four parts using the default rules set.
The soprano part is limited to stepwise motion, and the alto and tenor parts are
limited to motions within a perfect octave.
>>> from music21.figuredBass import rules
>>> fbRules = rules.Rules()
>>> fbRules.partMovementLimits = [(1, 2), (2, 12), (3, 12)]
>>> fbRealization1 = fbLine.realize(fbRules)
>>> fbRealization1.getNumSolutions()
360
>>> #_DOCS_SHOW fbRealization1.generateRandomRealization().show()
.. image:: images/figuredBass/fbExamples_sol1A.*
:width: 700
Now, the restriction on upper parts being within a perfect octave of each other is
removed, and fbLine is realized again.
>>> fbRules.upperPartsMaxSemitoneSeparation = None
>>> fbRealization2 = fbLine.realize(fbRules)
>>> fbRealization2.keyboardStyleOutput = False
>>> fbRealization2.getNumSolutions()
3713168
>>> #_DOCS_SHOW fbRealization2.generateRandomRealization().show()
.. image:: images/figuredBass/fbExamples_sol2A.*
:width: 700
'''
from music21 import converter
s = converter.parse("tinynotation: 3/2 C2 D2_6 E2_6 F2_6 C#2_b7,5,3 D2 "
"BB2_#6,5,3 C2_6 AA#2_7,5,#3 BB1_6,4 BB2_7,#5,#3 E1.",
makeNotation=False)
return realizer.figuredBassFromStream(s)
def exampleD():
'''
This example was a homework assignment for 21M.302: Harmony & Counterpoint II
at MIT in the fall of 2010, taught by <NAME> of the MIT Music Program.
>>> from music21.figuredBass import examples
>>> fbLine = examples.exampleD()
>>> #_DOCS_SHOW fbLine.generateBassLine().show()
.. image:: images/figuredBass/fbExamples_bassLineD.*
:width: 700
The following is a realization of fbLine in four parts using the default rules set.
The soprano part is limited to stepwise motion, and the alto and tenor parts are
limited to motions within a perfect octave.
>>> from music21.figuredBass import rules
>>> fbRules = rules.Rules()
>>> fbRules.partMovementLimits = [(1, 2), (2, 12), (3, 12)]
>>> fbRealization1 = fbLine.realize(fbRules)
>>> fbRealization1.getNumSolutions()
1560
>>> #_DOCS_SHOW fbRealization1.generateRandomRealization().show()
.. image:: images/figuredBass/fbExamples_sol1D.*
:width: 700
Now, the restriction on voice overlap is lifted, which is common in keyboard-style
figured bass, and fbLine is realized again. Voice overlap can be seen in the fourth
measure.
>>> fbRules.forbidVoiceOverlap = False
>>> fbRealization2 = fbLine.realize(fbRules)
>>> fbRealization2.getNumSolutions()
109006
>>> #_DOCS_SHOW fbRealization2.generateRandomRealization().show()
.. image:: images/figuredBass/fbExamples_sol2D.*
:width: 700
Now, the restriction on voice overlap is reset, but the restriction on the upper parts
being within a perfect octave of each other is removed. fbLine is realized again.
>>> fbRules.forbidVoiceOverlap = True
>>> fbRules.upperPartsMaxSemitoneSeparation = None
>>> fbRealization3 = fbLine.realize(fbRules)
>>> fbRealization3.getNumSolutions()
29629539
>>> fbRealization3.keyboardStyleOutput = False
>>> #_DOCS_SHOW fbRealization3.generateRandomRealization().show()
.. image:: images/figuredBass/fbExamples_sol3D.*
:width: 700
'''
from music21 import converter
from music21 import key
s = converter.parse("tinynotation: 3/4 BB4 C#4_#6 D4_6 E2 E#4_7,5,#3 F#2_6,4 "
"F#4_5,#3 G2 E4_6 F#2_6,4 E4_#4,2 D2_6 EE4_7,5,#3 AA2.",
makeNotation=False)
s.insert(0, key.Key('b'))
return realizer.figuredBassFromStream(s)
def exampleB():
'''
This example was retrieved from page 114 of *The Music Theory Handbook* by <NAME>.
>>> from music21.figuredBass import examples
>>> fbLine = examples.exampleB()
>>> #_DOCS_SHOW fbLine.generateBassLine().show()
.. image:: images/figuredBass/fbExamples_bassLineB.*
:width: 700
First, fbLine is realized with the default rules set.
>>> fbRealization1 = fbLine.realize()
>>> fbRealization1.getNumSolutions()
422
>>> #_DOCS_SHOW fbRealization1.generateRandomRealization().show()
.. image:: images/figuredBass/fbExamples_sol1B.*
:width: 700
Now, a Rules object is created, and the restriction that the chords
need to be complete is lifted. fbLine is realized once again.
>>> from music21.figuredBass import rules
>>> fbRules = rules.Rules()
>>> fbRules.forbidIncompletePossibilities = False
>>> fbRealization2 = fbLine.realize(fbRules)
>>> fbRealization2.getNumSolutions()
188974
>>> #_DOCS_SHOW fbRealization2.generateRandomRealization().show()
.. image:: images/figuredBass/fbExamples_sol2B.*
:width: 700
'''
from music21 import converter
from music21 import key
s = converter.parse("tinynotation: 4/4 D4 A4_7,5,#3 B-4 F4_6 G4_6 AA4_7,5,#3 D2",
makeNotation=False)
s.insert(0, key.Key('d'))
return realizer.figuredBassFromStream(s)
def exampleC():
'''
This example was retrieved from page 114 of *The Music Theory Handbook* by <NAME>.
>>> from music21.figuredBass import examples
>>> fbLine = examples.exampleC()
>>> #_DOCS_SHOW fbLine.generateBassLine().show()
.. image:: images/figuredBass/fbExamples_bassLineC.*
:width: 700
First, fbLine is realized with the default rules set.
>>> fbRealization1 = fbLine.realize()
>>> fbRealization1.getNumSolutions()
833
>>> #_DOCS_SHOW fbRealization1.generateRandomRealization().show()
.. image:: images/figuredBass/fbExamples_sol1C.*
:width: 700
Now, parallel fifths are allowed in realizations. The image below
shows one of them. There is a parallel fifth between the bass and
alto parts going from the half-diminished 6,5 (B,F#) to the dominant
seventh (C#,G#) in the second measure.
>>> from music21.figuredBass import rules
>>> fbRules = rules.Rules()
>>> fbRules.forbidParallelFifths = False
>>> fbRealization2 = fbLine.realize(fbRules)
>>> fbRealization2.getNumSolutions()
2427
>>> #_DOCS_SHOW fbRealization2.generateRandomRealization().show()
.. image:: images/figuredBass/fbExamples_sol2C.*
:width: 700
'''
from music21 import converter
from music21 import key
s = converter.parse("tinynotation: 4/4 FF#4 GG#4_#6 AA4_6 FF#4 BB4_6,5 C#4_7,5,#3 F#2",
makeNotation=False)
s.insert(0, key.Key('f#'))
return realizer.figuredBassFromStream(s)
def V43ResolutionExample():
'''
The dominant 4,3 can resolve to either the tonic 5,3 or tonic 6,3. The proper resolution
is dependent on the bass note of the tonic, and is determined in context, as shown in the
following figured bass realization.
>>> from music21.figuredBass import examples
>>> fbLine = examples.V43ResolutionExample()
>>> fbRealization = fbLine.realize()
>>> #_DOCS_SHOW fbRealization.generateRandomRealization().show()
.. image:: images/figuredBass/fbExamples_V43.*
:width: 350
'''
from music21 import converter
from music21 import key
s = converter.parse("tinynotation: 4/4 D2 E2_4,3 D2_5,3 E2_4,3 F#1_6,3", makeNotation=False)
s.insert(0, key.Key('D'))
return realizer.figuredBassFromStream(s)
def viio65ResolutionExample():
'''
For a fully diminished seventh chord resolving to the tonic, the resolution chord
can contain either a doubled third (standard resolution) or a doubled tonic (alternate
resolution), depending on whether the third of the diminished chord rises or falls.
The user can control this in a Rules object by modifying
:attr:`~music21.figuredBass.rules.Rules.doubledRootInDim7`.
However, when resolving a diminished 6,5, the third is found in the bass and the
proper resolution is determined in context, regardless of user preference.
The following shows both cases involving a diminished 6,5. The resolution of the
first diminished chord has a doubled D, while that of the second has a doubled F#.
Notice that the resolution of the first involves a diminished fifth (E, Bb) going
to a perfect fifth (D, A).
>>> from music21.figuredBass import examples
>>> fbLine = examples.viio65ResolutionExample()
>>> fbRealization = fbLine.realize()
>>> #_DOCS_SHOW fbRealization.generateRandomRealization().show()
.. image:: images/figuredBass/fbExamples_vii65.*
:width: 700
'''
from music21 import converter
from music21 import key
s = converter.parse("tinyNotation: 4/4 D2 E2_6,b5 D2 E2_6,b5 F#1_6", makeNotation=False)
s.insert(0, key.Key('D'))
return realizer.figuredBassFromStream(s)
def augmentedSixthResolutionExample():
'''
This example was retrieved from page 61 of *The Music Theory Handbook* by <NAME>.
Italian (8,#6,3), French (#6,4,3), German (#6,5,3), and Swiss (#6,#4,3)
augmented sixth resolutions to
either the major dominant or the major/minor tonic 6,4 are supported.
The first four bars show the
resolutions to the dominant in the order above, while the last bar
shows the German augmented sixth
resolving to the tonic.
>>> from music21.figuredBass import examples
>>> fbLine = examples.augmentedSixthResolutionExample()
>>> fbRealization = fbLine.realize()
>>> #_DOCS_SHOW fbRealization.generateRandomRealization().show()
.. image:: images/figuredBass/fbExamples_a6.*
:width: 700
'''
from music21 import converter
from music21 import key
s = converter.parse("tinynotation: 4/4 D4 BB-4_8,#6,3 AA2_# D4 BB-4_#6,4,3 "
"AA2_# D4 BB-4_#6,5,3 AA2_# D4 BB-4_#6,#4,3 AA2_# D4 "
"BB-4_#6,5,3 AA2_6,4",
makeNotation=False)
s.insert(0, key.Key('d'))
return realizer.figuredBassFromStream(s)
def italianA6ResolutionExample():
'''
The Italian augmented sixth chord (It+6) is the only
augmented sixth chord to consist of only three
pitch names, and when represented in four parts, the
tonic is doubled. The tonic can resolve up, down or
stay the same, and in four parts, the two tonics always
resolve differently, resulting in two equally
acceptable resolutions. An alternate approach to resolving
the It+6 chord was taken, such that an It+6
chord could map internally to two different resolutions.
Every other special resolution in fbRealizer
consists of a 1:1 mapping of special chords to resolutions.
Here, the It+6 chord is resolving to the dominant, minor tonic,
and major tonic, respectively. In the
dominant resolution shown, the tonics (D) are resolving inward,
but they can resolve outward as well. In
the minor tonic resolution, the higher tonic is resolving up to F,
and the lower tonic remains the same.
In the major tonic resolution, the higher tonic remains the same,
while the lower tonic resolves up to the F#.
>>> from music21.figuredBass import examples
>>> from music21.figuredBass import rules
>>> fbLine = examples.italianA6ResolutionExample()
>>> fbRules = rules.Rules()
>>> fbRules.upperPartsMaxSemitoneSeparation = None
>>> fbRules.partMovementLimits.append([1, 4])
>>> fbRealization = fbLine.realize(fbRules)
>>> fbRealization.keyboardStyleOutput = False
>>> #_DOCS_SHOW fbRealization.generateRandomRealization().show()
.. image:: images/figuredBass/fbExamples_it+6.*
:width: 700
'''
from music21 import converter
from music21 import key
s = converter.parse(
"tinynotation: D4 BB-4_#6,3 AA2_# D4 BB-4_#6,3 AA2_6,4 D4 BB-4_#6,3 AA2_#6,4",
makeNotation=False)
s.insert(0, key.Key('d'))
return realizer.figuredBassFromStream(s)
def twelveBarBlues():
'''
This is an progression in Bb major based on the twelve bar blues. The progression used is:
I | IV | I | I7
IV | IV | I | I7
V7 | IV6 | I | I
>>> from music21.figuredBass import examples
>>> from music21.figuredBass import rules
>>> bluesLine = examples.twelveBarBlues()
>>> #_DOCS_SHOW bluesLine.generateBassLine().show()
.. image:: images/figuredBass/fbExamples_bluesBassLine.*
:width: 700
>>> fbRules = rules.Rules()
>>> fbRules.partMovementLimits = [(1, 4), (2, 12), (3, 12)]
>>> fbRules.forbidVoiceOverlap = False
>>> blRealization = bluesLine.realize(fbRules)
>>> blRealization.getNumSolutions()
2224978
>>> #_DOCS_SHOW blRealization.generateRandomRealization().show()
.. image:: images/figuredBass/fbExamples_twelveBarBlues.*
:width: 700
'''
from music21 import converter
from music21 import key
s = converter.parse(
"tinynotation: BB-1 E-1 BB-1 BB-1_7 E-1 E-1 BB-1 BB-1_7 F1_7 G1_6 BB-1 BB-1",
makeNotation=False)
s.insert(0, key.Key('B-'))
return realizer.figuredBassFromStream(s)
# -----------------------------------------------------------------
# METHODS FOR GENERATION OF BLUES VAMPS
def generateBoogieVamp(blRealization=None, numRepeats=5):
'''
Turns whole notes in twelve bar blues bass line to blues boogie woogie bass line. Takes
in numRepeats, which is the number of times to repeat the bass line. Also, takes in a
realization of :meth:`~music21.figuredBass.examples.twelveBarBlues`. If none is provided,
a default realization with :attr:`~music21.figuredBass.rules.Rules.forbidVoiceOverlap`
set to False and :attr:`~music21.figuredBass.rules.Rules.partMovementLimits` set to
[(1, 4), (2, 12), (3, 12)] is used.
>>> from music21.figuredBass import examples
>>> #_DOCS_SHOW examples.generateBoogieVamp(numRepeats=1).show()
.. image:: images/figuredBass/fbExamples_boogieVamp.*
:width: 700
'''
from music21 import converter
from music21 import stream
from music21 import interval
if blRealization is None:
bluesLine = twelveBarBlues()
fbRules = rules.Rules()
fbRules.partMovementLimits = [(1, 4), (2, 12), (3, 12)]
fbRules.forbidVoiceOverlap = False
blRealization = bluesLine.realize(fbRules)
sampleScore = blRealization.generateRandomRealizations(numRepeats)
boogieBassLine = converter.parse("tinynotation: BB-8. D16 F8. G16 A-8. G16 F8. D16",
makeNotation=False)
newBassLine = stream.Part()
newBassLine.append(sampleScore[1][0]) # Time signature
newBassLine.append(sampleScore[1][1]) # Key signature
for n in sampleScore[1].notes:
i = interval.notesToInterval(boogieBassLine[0], n)
tp = boogieBassLine.transpose(i)
for lyr in n.lyrics:
tp.notes.first().addLyric(lyr.text)
for m in tp.notes:
newBassLine.append(m)
newScore = stream.Score()
newScore.insert(0, sampleScore[0])
newScore.insert(newBassLine)
return newScore
def generateTripletBlues(blRealization=None, numRepeats=5): # 12/8
'''
Turns whole notes in twelve bar blues bass line to triplet blues bass line. Takes
in numRepeats, which is the number of times to repeat the bass line. Also, takes in a
realization of :meth:`~music21.figuredBass.examples.twelveBarBlues`. If none is provided,
a default realization with :attr:`~music21.figuredBass.rules.Rules.forbidVoiceOverlap`
set to False and :attr:`~music21.figuredBass.rules.Rules.partMovementLimits` set to
[(1, 4), (2, 12), (3, 12)] is used.
>>> from music21.figuredBass import examples
>>> #_DOCS_SHOW examples.generateTripletBlues(numRepeats=1).show()
.. image:: images/figuredBass/fbExamples_tripletBlues.*
:width: 700
'''
from music21 import converter
from music21 import stream
from music21 import interval
from music21 import meter
if blRealization is None:
bluesLine = twelveBarBlues()
fbRules = rules.Rules()
fbRules.partMovementLimits = [(1, 4), (2, 12), (3, 12)]
fbRules.forbidVoiceOverlap = False
blRealization = bluesLine.realize(fbRules)
sampleScore = blRealization.generateRandomRealizations(numRepeats)
tripletBassLine = converter.parse("tinynotation: BB-4 BB-8 D4 D8 F4 F8 A-8 G8 F8",
makeNotation=False)
newBassLine = stream.Part()
for n in sampleScore[1].notes:
i = interval.notesToInterval(tripletBassLine[0], n)
tp = tripletBassLine.transpose(i)
for lyr in n.lyrics:
tp.notes.first().addLyric(lyr.text)
for m in tp.notes:
newBassLine.append(m)
newTopLine = stream.Part()
for sampleChord in sampleScore[0].notes:
sampleChordCopy = copy.deepcopy(sampleChord)
sampleChordCopy.quarterLength = 6.0
newTopLine.append(sampleChordCopy)
newScore = stream.Score()
newScore.append(meter.TimeSignature("12/8")) # Time signature
newScore.append(sampleScore[1][1]) # Key signature
newScore.insert(0, newTopLine)
newScore.insert(0, newBassLine)
return newScore
_DOC_ORDER = [exampleA, exampleB, exampleC, exampleD, V43ResolutionExample,
viio65ResolutionExample,
augmentedSixthResolutionExample, italianA6ResolutionExample, twelveBarBlues,
generateBoogieVamp, generateTripletBlues]
# ------------------------------------------------------------------------------
class Test(unittest.TestCase):
pass
if __name__ == '__main__':
import music21
music21.mainTest(Test)
| 2 | 2 |
mod_stats/tools/dlcount.py | openSUSE/mirrorbrain | 24 | 81412 | #!/usr/bin/python
# Copyright 2008,2009 <NAME>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License version 2
# as published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
#
#
#
# Analyze Apache logfiles in order to count downloads
#
#
# This script parses a MirrorBrain-enhanced access_log and does the following:
# - select lines on that the log analysis is supposed to run
# (StatsLogMask directive, which defaults to a regexp suitable for a MirrorBrain logfile)
# The expression also selects data from the log line, for example the
# country where a client request originated from.
# - a little ring buffer filters requests recurring within a sliding time
# window (keyed by ip+url+referer+user-agent
# length of the sliding window: StatsDupWindow
# - arbitrary log lines can be ignored by regexp (StatsIgnoreMask)
# - IP addresses can be ignored by string prefix match (StatsIgnoreIP)
# - apply prefiltering to the request (regular expressions with substitution)
# with one or more StatsPrefilter directives
# - parse the remaining request url into the values to be logged
# (StatsCount directive)
# - apply optional post-filtering to the parsed data (StatsPostfilter)
#
#
# The script should serve as model implementation for the Apache module which
# does the same in realtime.
#
#
# Usage:
# ./dlcount.py /var/log/apache2/download.services.openoffice.org/2009/11/download.services.openoffice.org-20091123-access_log.bz2 | sort -u
#
# Uncompressed, gzip or bzip2 compressed files are transparently opened.
#
#
# This script uses Python generators, which means that it doesn't allocate
# memory according to the log size. It rather works like a Unix pipe.
# (The implementation of the generator pipeline is based on David Beazley's
# PyCon UK 08 great talk about generator tricks for systems programmers.)
#
__version__ = '0.91'
__author__ = '<NAME> <<EMAIL>>'
__copyright__ = 'Peter poeml <<EMAIL>>'
__license__ = 'GPLv2'
__url__ = 'http://mirrorbrain.org/'
try:
import sys
import os
import os.path
import re
import hashlib
import time
from datetime import datetime
from optparse import OptionParser
set
except NameError:
from sets import Set as set # Python 2.3 fallback
try:
sorted
except NameError:
def sorted(in_value): # Python 2.3 fallback
"A naive implementation of sorted"
out_value = list(in_value)
out_value.sort()
return out_value
def gen_open(filenames):
"""Open a sequence of filenames"""
import gzip
import bz2
for name in filenames:
if name.endswith(".gz"):
yield gzip.open(name)
elif name.endswith(".bz2"):
yield bz2.BZ2File(name)
else:
yield open(name)
def gen_cat(sources):
"""Concatenate items from one or more
source into a single sequence of items"""
for s in sources:
for item in s:
yield item
def gen_grep(pat, lines):
import re
patc = re.compile(pat)
for line in lines:
if patc.search(line):
yield line
def gen_fragments(lines, pat):
"""Generate a sequence of line fragments, according to
a given regular expression"""
for line in lines:
m = pat.match(line)
if m:
yield m.groups()
# else:
# print 'no match for:'
# print line
class RingBuffer:
"""Here is a simple circular buffer, or ring buffer, implementation in
Python. It is a first-in, first-out (FIFO) buffer with a fixed size.
Here is an example where the buffer size is 4. Ten integers, 0-9, are
inserted, one at a time, at the end of the buffer. Each iteration, the first
element is removed from the front of the buffer.
buf = RingBuffer(4)
for i in xrange(10):
buf.append(i)
print buf.get()
Here are the results:
[None, None, None, 0]
[None, None, 0, 1]
[None, 0, 1, 2]
[0, 1, 2, 3]
[1, 2, 3, 4]
[2, 3, 4, 5]
[3, 4, 5, 6]
[4, 5, 6, 7]
[5, 6, 7, 8]
[6, 7, 8, 9]
from http://www.saltycrane.com/blog/2007/11/python-circular-buffer/
"""
def __init__(self, size):
self.data = [None for i in xrange(size)]
def append(self, x):
self.data.pop(0)
self.data.append(x)
def get(self):
return self.data
def readconf(filename):
"""we'd need Apache's config parser here..."""
known_directives = ['StatsLogMask',
'StatsIgnoreMask',
'StatsIgnoreIP',
'StatsDupWindow',
'StatsPreFilter',
'StatsCount',
'StatsPostFilter']
known_directives_lower = [i.lower() for i in known_directives]
# regular expressions to parse arguments
parse_1_in_quotes = re.compile(r'"(.*)"')
parse_2_in_quotes = re.compile(r'"(.*)"\s+"(.*)"')
# create a dictionary to hold the config
# each item is a list (because the directives could occur more than once)
# each list item will correspond to one directive occurrence
conf = {}
for i in known_directives_lower:
conf[i] = list()
conf['statsdupwindow'] = 200
for line in open(filename):
# remove trailing and leading whitespace and newlines
line = line.strip()
# ignore comment lines
if line.startswith('#'):
continue
# and empty lines
if not line:
continue
# split line into 1st word plus rest
# will fail if it's not a valid config line
try:
word, val = line.split(None, 1)
except:
sys.exit('error: can\'t parse the line %r' % line)
if word.lower() not in known_directives_lower:
sys.exit('unknown config directive: %r' % word)
directive = word.lower()
val = val
# this is just a single integer
if directive in ['statsdupwindow']:
conf[directive] = int(val)
# directives with one argument: a regexp
elif directive in ['statslogmask', 'statsignoremask']:
m = parse_1_in_quotes.match(val)
regex = m.group(1).replace('\\"', '"')
regex_compiled = re.compile(regex)
conf[directive].append((regex_compiled, regex))
# these come with two args: a regexp and a substitution rule
elif directive in ['statsprefilter', 'statscount', 'statspostfilter']:
m = parse_2_in_quotes.match(val)
# print 'substitute %s by %s' % (m.group(1), m.group(2))
regex = m.group(1).replace('\\"', '"')
subst = m.group(2).replace('\\"', '"')
regex_compiled = re.compile(regex)
conf[directive].append((regex_compiled, subst, regex))
elif directive in ['statsignoreip']:
conf[directive].append(val)
else:
sys.exit('unparsed directive (implementation needed)', directive)
# set defaults for directives that didn't occur in the config
if not len(conf['statslogmask']):
regex = '^(\S+).+\[(.*?)\] "GET (\S*) HTTP.*" (200|302) [^"]+ "([^"]*)" "([^"]*)".* \w\w:(\w\w) ASN:'
regex_compiled = re.compile(regex)
conf['statslogmask'] = [(regex_compiled, regex)]
#import pprint
# pprint.pprint(conf)
# sys.exit(0)
return conf
# class Countable():
# """This holds a result from a parsed log line
# which consists of a date and 5 attributes"""
# #def __init__(self, date, a0, a1, a2, a3, a4):
# def __init__(self, (date, a0, a1, a2, a3, a4, a5)):
# self.date = date
# self.a0 = a0
# self.a1 = a1
# self.a2 = a2
# self.a3 = a3
# self.a4 = a4
# self.a5 = a5
class Req():
"""This helps us in housekeeping while parsing a log line"""
def __init__(self):
# url_raw contains the original url, if needed
self.url_raw = None
self.tstamp = None
self.tstamp_raw = None
self.date = None
self.status = None
self.referer = None
self.ua = None
self.country = None
# this is the processed URL, after running through all the regexps
self.url = None
self.countable = False
def __str__(self):
return '%-80s' % self.url
def as_tuple(self):
return self.tuple
# def as_obj(self):
# return Countable(self.tuple)
def gen_processreqs(reqs, conf, options):
"""process a tuple of request data, and return the parsed in the form of a generator"""
known = RingBuffer(conf['statsdupwindow'])
for req in reqs:
rq = Req()
if len(req) == 7:
(ip, tstamp_raw, url, status, referer, ua, country) = req
elif len(req) == 6:
(ip, tstamp_raw, url, status, referer, ua) = req
country = ''
skip = False
for r, mreg in conf['statsignoremask']:
if r.match(url):
# print 'ignoring req %s because it matches %s' %(url, mreg)
skip = True
break
if skip:
continue
for i in conf['statsignoreip']:
if ip.startswith(i):
# print 'ignoring ip %s because it matches %s' %(ip, i)
skip = True
break
if skip:
continue
# over a window of StatsDupWindow last requests, the same request must
# not have occured already. If it did, ignore it. If it didn't, put
# it into the ring buffer.
if conf['statsdupwindow'] > 0:
m = hashlib.md5()
m.update(ip)
m.update(url)
m.update(referer)
m.update(ua)
md = m.digest()
if md in known.data:
continue
known.append(md)
rq.url_raw = url
rq.status = status
rq.referer = referer
rq.ua = ua
rq.country = country.lower()
tstamp_raw = tstamp_raw.split()[0] # split off timezone offset - we ignore it
rq.tstamp = time.strptime(tstamp_raw, '%d/%b/%Y:%H:%M:%S')
rq.tstamp_raw = tstamp_raw
# apply the prefiltering rules
for r, s, mreg in conf['statsprefilter']:
url = r.sub(s, url)
matched = False
for r, s, mreg in conf['statscount']:
if r.match(url):
if matched:
# FIXME: eventually, we want to allow multiple matches. But now we are debugging.
sys.exit('warning: %r matches\n %r\nbut already matched a pevious regexp:\n %r' % (url, mreg, matched))
url = r.sub(s, url)
matched = mreg
if not matched:
if options.verbose:
print 'not matched', url
yield rq
continue
# apply postfiltering
for r, s, mreg in conf['statspostfilter']:
url = r.sub(s, url)
rq.url = url
# would time.strftime("%Y-%m-%d", ...) be faster?
rq.date = datetime(rq.tstamp[0], rq.tstamp[1], rq.tstamp[2])
rq.tuple = [rq.date]
rq.tuple.extend(rq.url.split())
# the country is our fifth attribute
rq.tuple.append(rq.country)
rq.tuple = tuple(rq.tuple)
rq.countable = True
# print rq
yield rq
def main():
"""
Create a generator pipeline for the matching log file lines
and process them.
"""
usage = 'usage: %prog [options] CONFIGFILE LOGFILE [LOGFILE ...]'
version = '%prog ' + __version__
parser = OptionParser(usage=usage, version=version)
# parser.disable_interspersed_args()
parser.add_option('--db',
action="store_true", dest="db", default=False,
help="save counts to the database")
parser.add_option('--db-home',
help="specify directory where the database lives", metavar='DIR')
parser.add_option("-q", "--quiet",
action="store_true", dest="quiet", default=False,
help="print only errors")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default=False,
help="print debug messages to stderr")
(options, args) = parser.parse_args()
usage = usage.replace('%prog', os.path.basename(sys.argv[0]))
if len(args) < 2:
sys.exit(usage)
conffile = args[0]
filenames = args[1:]
conf = readconf(conffile)
logfiles = gen_open(filenames)
loglines = gen_cat(logfiles)
reqs = gen_fragments(loglines, conf['statslogmask'][0][0])
items = gen_processreqs(reqs, conf, options)
if options.db and not options.db_home:
sys.exit('--db-home is mandatory with --db.')
if options.db:
dirpath = options.db_home
#dirpath = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
dirpath = os.path.realpath(dirpath)
os.chdir(dirpath)
sys.path.insert(0, os.path.dirname(dirpath))
os.environ['DJANGO_SETTINGS_MODULE'] = 'downloadstats.settings'
from downloadstats.stats.models import Counter
import downloadstats.settings
if downloadstats.settings.DEBUG:
from django import db
# print 'you are runninng in DEBUG mode. This is not recommended, because\n' \
# 'Django then saves a copy of every SQL statement it has executed.\n' \
# 'I'm installing a cleanup handler that\'ll help.'
# see below, in the loop
# http://docs.djangoproject.com/en/dev/faq/models/#why-is-django-leaking-memory
start = time.time()
counterdict = {}
n = 0
get = counterdict.get
for item in items:
if not item.countable:
continue
t = item.as_tuple()
n += 1
counterdict[t] = get(t, 0) + 1
delta = time.time() - start
print 'processed log in %.1f seconds, found %s valid and unique requests' % (delta, n)
print '%s distinct countables' % len(counterdict)
start = time.time()
if options.db:
for key, val in counterdict.iteritems():
(date, a0, a1, a2, a3, a4) = key
if downloadstats.settings.DEBUG:
db.reset_queries()
counter, created = Counter.objects.get_or_create(date=date,
product=a0, osname=a1, version=a2, lang=a3,
country=a4)
if created:
# count is 1 for a new item
counter.count = val
else:
# item existed already - increase its counter
counter.count += val
counter.save()
delta = time.time() - start
print 'saved counts to db in %.1f seconds' % delta
sys.exit(0)
if __name__ == '__main__':
main()
| 1.851563 | 2 |
library/file_name_cleaner.py | abhips/bulk-filename-cleaner | 0 | 81540 | """
actual class which do the file name cleanup functions
:copyright: 2020 <NAME>
:license: The MIT License
"""
import os
import shutil
import pathlib
from .helper_enums import FileNameCase
from .script_utils import ScriptUtils
class FileNameCleaner(object):
def __init__(self, input):
self.__filter_words = []
self.__excluded_directories = []
self.__input = input
self.__utils = ScriptUtils()
def __repr__(self):
return ''
def __str__(self):
return ''
"""
function to copy the files with cleaned up file names from source to target directories
"""
def cleanup(self):
# initialize the directory path lists
source_dir_paths = [self.__input.source_directory_path, ]
target_dir_paths = [self.__input.target_directory_path, ]
self.__utils.print_out("")
# process directory path lists and create target directories
while len(source_dir_paths) != 0:
src_path = source_dir_paths.pop()
trgt_path = target_dir_paths.pop()
# initialize lists to hold filenames and directory names for the current directory
dir_names = []
file_names = []
# append filenames and directory names to the corresponding lists
for (dirpath, dirnames, filenames) in os.walk(src_path):
file_names.extend(filenames)
dir_names.extend(dirnames)
break
# adding to sub directory list
while len(dir_names) != 0:
old_dir = dir_names.pop()
source_dir_path = os.path.join(src_path, old_dir)
# cleanup the old directory name
new_dir = self.cleanup_name(source_dir_path, old_dir, True)
target_dir_path = os.path.join(trgt_path, new_dir)
if source_dir_path == self.__input.target_directory_path:
continue
source_dir_paths.append(source_dir_path)
target_dir_paths.append(target_dir_path)
if not os.path.exists(target_dir_path):
self.__utils.print_out(
"creating directory '{}'".format(target_dir_path), '', 4)
os.makedirs(target_dir_path)
# copying the files
while len(file_names) != 0:
old_f = file_names.pop()
new_f = self.cleanup_name(os.path.join(src_path, old_f), old_f, False)
self.__utils.print_out(
"'{}' ----> '{}'".format(os.path.join(src_path, old_f), os.path.join(trgt_path, new_f)), '', 4)
self.__utils.print_out("", '', 4)
try:
shutil.copy(os.path.join(src_path, old_f),
os.path.join(trgt_path, new_f))
except PermissionError as pe:
self.__utils.print_out(
'PermissionError - {} - {}'.format(pe, os.path.join(src_path, old_f)), '#', 4)
except FileNotFoundError as fne:
self.__utils.print_out(
'FileNotFoundError - {} - {}'.format(fne, os.path.join(src_path, old_f)), '#', 4)
return True
"""
function to cleanup the filename. If it is a directory then name will Title cased,
for files it will be according to the user input.
"""
def cleanup_name(self, old_full_path, old_name, is_directory):
extension = ''
root_name = old_name
if not is_directory:
_, extension = os.path.splitext(old_full_path)
root_name = root_name.replace(extension, '')
extension = extension.lower()
new_name = root_name.lower()
# filter out the words in the filter list
if len(self.__filter_words) > 0:
for filter in self.__filter_words:
new_name = new_name.replace(filter.lower(), ' ')
# replace the excess ' ' with single ' '
new_name = self.__utils.replace_multiple_character_occurances(
new_name, ' ')
# change the file name case
if not is_directory:
if self.__input.file_name_case == FileNameCase.LOWER:
new_name = new_name.lower()
elif self.__input.file_name_case == FileNameCase.UPPER:
new_name = new_name.upper()
elif self.__input.file_name_case == FileNameCase.TITLE:
new_name = new_name.title()
else:
new_name = new_name.lower()
else:
new_name = new_name.title()
# add the file name prefix
if self.__input.file_name_prefix and not is_directory:
new_name = self.__input.file_name_prefix + ' ' + new_name
# change the file name separator
new_name = new_name.replace(',', self.__input.file_name_separator)
new_name = new_name.replace('(', self.__input.file_name_separator)
new_name = new_name.replace(')', self.__input.file_name_separator)
new_name = new_name.replace('[', self.__input.file_name_separator)
new_name = new_name.replace(']', self.__input.file_name_separator)
new_name = new_name.replace('{', self.__input.file_name_separator)
new_name = new_name.replace('}', self.__input.file_name_separator)
new_name = new_name.replace('`', self.__input.file_name_separator)
new_name = new_name.replace('.', self.__input.file_name_separator)
new_name = new_name.replace('"', self.__input.file_name_separator)
new_name = new_name.replace("'", self.__input.file_name_separator)
new_name = new_name.replace(',', self.__input.file_name_separator)
new_name = new_name.replace('+', self.__input.file_name_separator)
new_name = new_name.replace('*', self.__input.file_name_separator)
new_name = new_name.replace('~', self.__input.file_name_separator)
new_name = new_name.replace('^', self.__input.file_name_separator)
new_name = new_name.replace('=', self.__input.file_name_separator)
new_name = new_name.replace('@', self.__input.file_name_separator)
new_name = new_name.replace('#', self.__input.file_name_separator)
new_name = new_name.replace('$', self.__input.file_name_separator)
new_name = new_name.replace('%', self.__input.file_name_separator)
new_name = new_name.replace('&', self.__input.file_name_separator)
new_name = new_name.replace('!', self.__input.file_name_separator)
new_name = new_name.replace('—', self.__input.file_name_separator)
# strip off any leading or trailing spaces
new_name = new_name.strip()
if self.__input.file_name_separator == '_':
new_name = new_name.replace(' ', '_')
new_name = new_name.replace('-', '_')
elif self.__input.file_name_separator == '-':
new_name = new_name.replace(' ', '-')
new_name = new_name.replace('_', '-')
else:
new_name = new_name.replace('-', ' ')
new_name = new_name.replace('_', ' ')
new_name = new_name.strip()
new_name = self.__utils.replace_multiple_character_occurances(
new_name, self.__input.file_name_separator)
if not is_directory:
new_name = new_name + extension
return new_name
| 2.59375 | 3 |
qgreenland/util/config.py | faunalia/qgreenland | 15 | 81668 | """Provide helper functions for generating configuration.
ONLY the constants module should import this module.
"""
import copy
import csv
import functools
import os
from pathlib import Path
# HACK HACK HACK HACK HACK HACK HACK HACK HACK THIS IS A DUMB HACK HACK HACK
# Importing qgis before fiona is absolutely necessary to avoid segmentation
# faults. They have been occurring in unit tests. We still have no clue why.
import qgis.core as qgc # noqa: F401
import fiona # noqa: I100
# HACK HACK HACK HACK HACK HACK HACK HACK HACK THIS IS A DUMB HACK HACK HACK
import yamale
from humanize import naturalsize
import qgreenland.exceptions as exc
from qgreenland.constants import LOCALDATA_DIR
from qgreenland.util.misc import directory_size_bytes, get_layer_path
def _load_config(config_filename, *, config_dir, schema_dir):
"""Validate config file against schema with Yamale.
It is expected that the given config filename in CONFIG_DIR has a schema of
matching name in CONFIG_SCHEMA_DIR.
Yamale can read in directories of config files, so it returns a list of
(data, fp) tuples. We always read single files, so we return just the data
from result[0][0].
"""
config_fp = os.path.join(config_dir, config_filename)
schema_fp = os.path.join(schema_dir, config_filename)
if not os.path.isfile(config_fp):
raise NotImplementedError(
'Loading is supported for only one config file at a time.'
)
schema = yamale.make_schema(schema_fp)
config = yamale.make_data(config_fp)
yamale.validate(schema, config, strict=True)
return config[0][0]
def _find_in_list_by_id(haystack, needle):
matches = [d for d in haystack if d['id'] == needle]
if len(matches) > 1:
raise LookupError(f'Found multiple matches in list with same id: {needle}')
if len(matches) != 1:
raise LookupError(f'Found no matches in list with id: {needle}')
return copy.deepcopy(matches[0])
def _deref_boundaries(cfg):
"""Dereference project boundaries, modifying `cfg`.
Replace project boundary value (filename) with an object containing
useful information about the boundary file.
"""
boundaries_config = cfg['project']['boundaries']
for boundary_name, boundary_fn in boundaries_config.items():
fp = os.path.join(LOCALDATA_DIR, boundary_fn)
with fiona.open(fp) as ifile:
features = list(ifile)
meta = ifile.meta
bbox = ifile.bounds
if (feature_count := len(features)) != 1:
raise exc.QgrInvalidConfigError(
f'Configured boundary {boundary_name} contains the wrong'
f' number of features. Expected 1, got {feature_count}.'
)
if (boundary_crs := meta['crs']['init'].lower()) \
!= (project_crs := cfg['project']['crs'].lower()):
raise exc.QgrInvalidConfigError(
f'Expected CRS of boundary file {fp} ({boundary_crs}) to'
f' match project CRS ({project_crs}).'
)
boundaries_config[boundary_name] = {
'fp': fp,
'features': features,
'bbox': bbox,
}
def _deref_layers(cfg):
"""Dereferences layers in `cfg`, modifying `cfg`.
Expects boundaries to already be dereferenced.
"""
layers_config = cfg['layers']
datasets_config = cfg['datasets']
project_config = cfg['project']
for layer_config in layers_config:
# Populate related dataset configuration
if 'dataset' not in layer_config:
dataset_id, source_id = layer_config['data_source'].split('.')
dataset_config = _find_in_list_by_id(datasets_config, dataset_id)
layer_config['dataset'] = dataset_config
layer_config['source'] = _find_in_list_by_id(dataset_config['sources'],
source_id)
del layer_config['dataset']['sources']
# Always default to the background extent
boundary_name = layer_config.get('boundary', 'background')
layer_config['boundary'] = project_config['boundaries'][boundary_name]
def _dereference_config(cfg):
"""Take a full configuration object, replace references with the referent.
- Datasets
- Sources
- Ingest Tasks
"""
_deref_boundaries(cfg)
_deref_layers(cfg)
# Turn layers config in to a dict keyed by id TODO: we should ensure that
# all objects in the CONFIG are immutable. We need to do a deepcopy here
# because the use of YAML anchors(`&`)/references(`*`) in the config yml
# files result in config objects that are copied by reference, not by
# value. So, if we try to e.g., `pop` an element from a list in the config,
# it will affect all other pieces of config that reference that data.
cfg['layers'] = {x['id']: copy.deepcopy(x) for x in cfg['layers']}
return cfg
@functools.lru_cache(maxsize=None)
def make_config(*, config_dir, schema_dir):
# TODO: Avoid all this argument drilling without import cycles... this
# shouldn't be so hard!
# TODO: Consider namedtuple or something?
cfg = {
'project': _load_config('project.yml',
config_dir=config_dir,
schema_dir=schema_dir),
'layers': _load_config('layers.yml',
config_dir=config_dir,
schema_dir=schema_dir),
'layer_groups': _load_config('layer_groups.yml',
config_dir=config_dir,
schema_dir=schema_dir),
'datasets': _load_config('datasets.yml',
config_dir=config_dir,
schema_dir=schema_dir)
}
return _dereference_config(cfg)
def export_config(cfg, output_path='./layers.csv'):
report = []
for _, layer in cfg['layers'].items():
if layer['dataset']['access_method'] != 'gdal_remote':
layer_dir = Path(get_layer_path(layer)).parent
layer_size_bytes = directory_size_bytes(layer_dir)
else:
# online layers have no size on disk.
layer_size_bytes = 0
report.append({
'Group': layer['group_path'].split('/', 1)[0],
'Subgroup': (layer['group_path'].split('/', 1)[1]
if '/' in layer['group_path'] else ''),
'Layer Title': layer['title'],
'Layer Description': layer.get('description', ''),
'Vector or Raster': layer['data_type'],
'Data Source Title': layer['dataset']['metadata']['title'],
'Data Source Abstract': layer['dataset']['metadata']['abstract'],
'Data Source Citation': layer['dataset']['metadata']['citation']['text'],
'Data Source Citation URL': layer['dataset']['metadata']['citation']['url'],
'Layer Size': naturalsize(layer_size_bytes),
'Layer Size Bytes': layer_size_bytes,
})
with open(output_path, 'w') as ofile:
dict_writer = csv.DictWriter(ofile, report[0].keys())
dict_writer.writeheader()
dict_writer.writerows(report)
print(f'Exported: {os.path.abspath(ofile.name)}')
| 1.5625 | 2 |
G2KD_Res/object/loss.py | tntek/G2KD | 0 | 81796 | import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import math
import torch.nn.functional as F
import pdb
def Entropy(input_):
bs = input_.size(0)
epsilon = 1e-5
entropy = -input_ * torch.log(input_ + epsilon)
entropy = torch.sum(entropy, dim=1)
return entropy
def grl_hook(coeff):
def fun1(grad):
return -coeff*grad.clone()
return fun1
def CDAN(input_list, ad_net, entropy=None, coeff=None, random_layer=None):
softmax_output = input_list[1].detach()
feature = input_list[0]
if random_layer is None:
op_out = torch.bmm(softmax_output.unsqueeze(2), feature.unsqueeze(1))
ad_out = ad_net(op_out.view(-1, softmax_output.size(1) * feature.size(1)))
else:
random_out = random_layer.forward([feature, softmax_output])
ad_out = ad_net(random_out.view(-1, random_out.size(1)))
batch_size = softmax_output.size(0) // 2
dc_target = torch.from_numpy(np.array([[1]] * batch_size + [[0]] * batch_size)).float().cuda()
if entropy is not None:
entropy.register_hook(grl_hook(coeff))
entropy = 1.0+torch.exp(-entropy)
source_mask = torch.ones_like(entropy)
source_mask[feature.size(0)//2:] = 0
source_weight = entropy*source_mask
target_mask = torch.ones_like(entropy)
target_mask[0:feature.size(0)//2] = 0
target_weight = entropy*target_mask
weight = source_weight / torch.sum(source_weight).detach().item() + \
target_weight / torch.sum(target_weight).detach().item()
return torch.sum(weight.view(-1, 1) * nn.BCELoss(reduction='none')(ad_out, dc_target)) / torch.sum(weight).detach().item()
else:
return nn.BCELoss()(ad_out, dc_target)
def DANN(features, ad_net):
ad_out = ad_net(features)
batch_size = ad_out.size(0) // 2
dc_target = torch.from_numpy(np.array([[1]] * batch_size + [[0]] * batch_size)).float().cuda()
return nn.BCELoss()(ad_out, dc_target)
class CrossEntropyLabelSmooth(nn.Module):
"""Cross entropy loss with label smoothing regularizer.
Reference:
Szegedy et al. Rethinking the Inception Architecture for Computer Vision. CVPR 2016.
Equation: y = (1 - epsilon) * y + epsilon / K.
Args:
num_classes (int): number of classes.
epsilon (float): weight.
"""
def __init__(self, num_classes, epsilon=0.1, use_gpu=True, reduction=True):
super(CrossEntropyLabelSmooth, self).__init__()
self.num_classes = num_classes
self.epsilon = epsilon
self.use_gpu = use_gpu
self.reduction = reduction
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, inputs, targets):
"""
Args:
inputs: prediction matrix (before softmax) with shape (batch_size, num_classes)
targets: ground truth labels with shape (num_classes)
"""
log_probs = self.logsoftmax(inputs)
targets = torch.zeros(log_probs.size()).scatter_(1, targets.unsqueeze(1).cpu(), 1)
if self.use_gpu: targets = targets.cuda()
targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes
loss = (- targets * log_probs).sum(dim=1)
if self.reduction:
return loss.mean()
else:
return loss
return loss
class KnowledgeDistillationLoss(nn.Module):
def __init__(self, reduction='mean', alpha=1.0):
super().__init__()
self.reduction = reduction
self.alpha = alpha
def forward(self, inputs, targets, mask=None):
inputs = inputs.narrow(1, 0, targets.shape[1])
outputs = torch.log_softmax(inputs, dim=1)
labels = torch.softmax(targets * self.alpha, dim=1)
loss = (outputs * labels).mean(dim=1)
if mask is not None:
loss = loss * mask.float()
if self.reduction == 'mean':
outputs = -torch.mean(loss)
elif self.reduction == 'sum':
outputs = -torch.sum(loss)
else:
outputs = -loss
return outputs
class entropy_loss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, logits):
y_pred = F.softmax(logits, dim=-1)
size = logits.size(0)
if size == 0:
loss = 0.0
else:
loss = torch.sum(-y_pred * torch.log(y_pred + 1e-5), dim=1)
return torch.mean(loss) | 1.953125 | 2 |
supvisors/tests/test_address.py | julien6387/supvisors | 66 | 81924 | <filename>supvisors/tests/test_address.py<gh_stars>10-100
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ======================================================================
# Copyright 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================================================
import pytest
import random
import time
from .base import database_copy
from .conftest import create_any_process, create_process
@pytest.fixture
def filled_node(supvisors):
""" Create an AddressStatus and add all processes of the database. """
from supvisors.address import AddressStatus
status = AddressStatus('10.0.0.1', supvisors.logger)
for info in database_copy():
process = create_process(info, supvisors)
process.add_info('10.0.0.1', info)
status.add_process(process)
return status
def test_create(supvisors):
""" Test the values set at construction. """
from supvisors.address import AddressStatus
from supvisors.ttypes import AddressStates
status = AddressStatus('10.0.0.1', supvisors.logger)
# test all AddressStatus values
assert status.logger == supvisors.logger
assert status.node_name == '10.0.0.1'
assert status.state == AddressStates.UNKNOWN
assert status.remote_time == 0
assert status.local_time == 0
assert status.processes == {}
def test_isolation(supvisors):
""" Test the in_isolation method. """
from supvisors.address import AddressStatus
from supvisors.ttypes import AddressStates
status = AddressStatus('10.0.0.1', supvisors.logger)
for state in AddressStates:
status._state = state
assert (status.in_isolation() and state in [AddressStates.ISOLATING, AddressStates.ISOLATED] or
not status.in_isolation() and state not in [AddressStates.ISOLATING, AddressStates.ISOLATED])
def test_serialization(supvisors):
""" Test the serial method used to get a serializable form of AddressStatus. """
import pickle
from supvisors.address import AddressStatus
from supvisors.ttypes import AddressStates
# create address status instance
status = AddressStatus('10.0.0.1', supvisors.logger)
status._state = AddressStates.RUNNING
status.checked = True
status.remote_time = 50
status.local_time = 60
# test to_json method
serialized = status.serial()
assert serialized == {'address_name': '10.0.0.1', 'loading': 0, 'statecode': 2, 'statename': 'RUNNING',
'remote_time': 50, 'local_time': 60, 'sequence_counter': 0}
# test that returned structure is serializable using pickle
dumped = pickle.dumps(serialized)
loaded = pickle.loads(dumped)
assert serialized == loaded
def test_transitions(supvisors):
""" Test the state transitions of AddressStatus. """
from supvisors.address import AddressStatus
from supvisors.ttypes import AddressStates, InvalidTransition
status = AddressStatus('10.0.0.1', supvisors.logger)
for state1 in AddressStates:
for state2 in AddressStates:
# check all possible transitions from each state
status._state = state1
if state2 in status._Transitions[state1]:
status.state = state2
assert status.state == state2
assert status.state.name == state2.name
elif state1 == state2:
assert status.state == state1
else:
with pytest.raises(InvalidTransition):
status.state = state2
def test_add_process(supvisors):
""" Test the add_process method. """
from supvisors.address import AddressStatus
status = AddressStatus('10.0.0.1', supvisors.logger)
process = create_any_process(supvisors)
status.add_process(process)
# check that process is stored
assert process.namespec in status.processes.keys()
assert process is status.processes[process.namespec]
def test_times(filled_node):
""" Test the update_times method. """
from supervisor.states import ProcessStates
# get current process times
ref_data = {process.namespec: (process.state, info['now'], info['uptime'])
for process in filled_node.processes.values()
for info in [process.info_map['10.0.0.1']]}
# update times and check
now = int(time.time())
filled_node.update_times(28, now + 10, now)
assert filled_node.sequence_counter == 28
assert filled_node.remote_time == now + 10
assert filled_node.local_time == now
# test process times: only RUNNING and STOPPING have a positive uptime
new_data = {process.namespec: (process.state, info['now'], info['uptime'])
for process in filled_node.processes.values()
for info in [process.info_map['10.0.0.1']]}
for namespec, new_info in new_data.items():
ref_info = ref_data[namespec]
assert new_info[0] == ref_info[0]
assert new_info[1] > ref_info[1]
if new_info[0] in [ProcessStates.RUNNING, ProcessStates.STOPPING]:
assert new_info[2] > ref_info[2]
else:
assert new_info[2] == ref_info[2]
def test_running_process(filled_node):
""" Test the running_process method. """
# check the name of the running processes
assert {'late_segv', 'segv', 'xfontsel', 'yeux_01'} == {proc.process_name
for proc in filled_node.running_processes()}
def test_pid_process(filled_node):
""" Test the pid_process method. """
# check the namespec and pid of the running processes
assert {('sample_test_1:xfontsel', 80879), ('sample_test_2:yeux_01', 80882)} == set(filled_node.pid_processes())
def test_get_loading(filled_node):
""" Test the get_loading method. """
# check the loading of the address: gives 0 by default because no rule has been loaded
assert filled_node.get_loading() == 0
# change expected_loading of any stopped process
process = random.choice([proc for proc in filled_node.processes.values() if proc.stopped()])
process.rules.expected_load = 50
assert filled_node.get_loading() == 0
# change expected_loading of any running process
process = random.choice([proc for proc in filled_node.processes.values() if proc.running()])
process.rules.expected_load = 50
assert filled_node.get_loading() == 50
| 1.421875 | 1 |
apip/layers.py | parshakova/APIP2 | 2 | 82052 | <reponame>parshakova/APIP2
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import math, gc
import string, re
import logging
import argparse
from shutil import copyfile
from datetime import datetime
from collections import Counter
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical
from torch.autograd import Variable
from torch.nn.utils.weight_norm import weight_norm
# Origin: https://github.com/facebookresearch/ParlAI/tree/master/parlai/agents/drqa
# ------------------------------------------------------------------------------
# Modification:
# - most classes are either modified or newly created
# - most functions are newly created
# ------------------------------------------------------------------------------
import cuda_functional as MF
class StackedBRNN(nn.Module):
def __init__(self, input_size, hidden_size, num_layers,
dropout_rate=0, dropout_output=False, rnn_stype='sru',
concat_layers=False, padding=False, bidirectional=True, n_actions=0,func=''):
super(StackedBRNN, self).__init__()
self.padding = padding
self.dropout_output = dropout_output
self.dropout_rate = dropout_rate
self.num_layers = num_layers
self.concat_layers = concat_layers
self.rnns = nn.ModuleList()
RNN_TYPES = {'lstm': nn.LSTM, 'gru': nn.GRU, 'rnn': nn.RNN, 'sru':0}
self.rnn_type = rnn_stype
self.n_actions = n_actions
if func:
start_l, end_l, func = int(func[0]), int(func[1]), func[3:]
else:
start_l = num_layers*2; end_l = 0
for i in range(num_layers):
input_size = input_size if i == 0 else 2 * hidden_size
if 'cell' in rnn_stype:
if 'gru' in rnn_stype:
self.rnns.append(CustomGRU(input_size, hidden_size,n_actions=n_actions if (i>=start_l and i<=end_l) else 0, func=func))
elif 'lstm' in rnn_stype:
self.rnns.append(CustomLSTM(input_size, hidden_size))
else:
if self.rnn_type == 'sru':
self.rnns.append(MF.SRUCell(input_size, hidden_size, dropout=dropout_rate, rnn_dropout=dropout_rate, use_tanh=1,\
n_actions=n_actions if (i>=start_l and i<=end_l) else 0, func=func, bidirectional=bidirectional))
else:
self.rnns.append(RNN_TYPES[rnn_stype](input_size, hidden_size,
num_layers=1, bidirectional=bidirectional))
def forward(self, x, x_mask=None, c0=None, actions=None, use_a=True):
"""Faster encoding that ignores any padding."""
# Transpose batch and sequence dims
x = x.transpose(0, 1)
# Encode all layers
outputs = [x]
for i in range(self.num_layers):
rnn_input = outputs[-1]
# Apply dropout to hidden input
if self.rnn_type != 'sru':
if self.dropout_rate > 0:
rnn_input = F.dropout(rnn_input,
p=self.dropout_rate,
training=self.training)
# Forward
if i == 0 and self.rnn_type=='sru':
rnn_output = self.rnns[i](rnn_input, c0=c0, actions=actions, use_a=use_a)[0]
else:
if self.n_actions>0:
rnn_output = self.rnns[i](rnn_input, actions=actions, use_a=use_a)[0]
else:
rnn_output = self.rnns[i](rnn_input)[0]
outputs.append(rnn_output)
# Concat hidden layers
if self.concat_layers:
output = torch.cat(outputs[1:], 2)
else:
output = outputs[-1]
# Transpose back
output = output.transpose(0, 1)
# Dropout on output layer
if self.dropout_output and self.dropout_rate > 0:
output = F.dropout(output,
p=self.dropout_rate,
training=self.training)
return output.contiguous()
def _forward_padded(self, x, x_mask):
"""Slower (significantly), but more precise,
encoding that handles padding."""
# Compute sorted sequence lengths
lengths = x_mask.data.eq(0).long().sum(1)
_, idx_sort = torch.sort(lengths, dim=0, descending=True)
_, idx_unsort = torch.sort(idx_sort, dim=0)
lengths = list(lengths[idx_sort])
idx_sort = Variable(idx_sort)
idx_unsort = Variable(idx_unsort)
# Sort x
x = x.index_select(0, idx_sort)
# Transpose batch and sequence dims
x = x.transpose(0, 1)
# Pack it up
rnn_input = nn.utils.rnn.pack_padded_sequence(x, lengths)
# Encode all layers
outputs = [rnn_input]
for i in range(self.num_layers):
rnn_input = outputs[-1]
# Apply dropout to input
if self.dropout_rate > 0:
dropout_input = F.dropout(rnn_input.data,
p=self.dropout_rate,
training=self.training)
rnn_input = nn.utils.rnn.PackedSequence(dropout_input,
rnn_input.batch_sizes)
outputs.append(self.rnns[i](rnn_input)[0])
# Unpack everything
for i, o in enumerate(outputs[1:], 1):
outputs[i] = nn.utils.rnn.pad_packed_sequence(o)[0]
# Concat hidden layers or take final
if self.concat_layers:
output = torch.cat(outputs[1:], 2)
else:
output = outputs[-1]
# Transpose and unsort
output = output.transpose(0, 1)
output = output.index_select(0, idx_unsort)
# Dropout on output layer
if self.dropout_output and self.dropout_rate > 0:
output = F.dropout(output,
p=self.dropout_rate,
training=self.training)
return output
class CustomGRU(nn.Module):
def __init__(self, input_size, hidden_size, n_actions=0, func=""):
super(CustomGRU, self).__init__()
RNN_CELLTYPES = {'lstm': nn.LSTMCell, 'gru': nn.GRUCell, 'rnn': nn.RNNCell}
self.rnn_cf = RNN_CELLTYPES['gru'](input_size, hidden_size)
self.rnn_cb = RNN_CELLTYPES['gru'](input_size, hidden_size)
self.hidden_size = hidden_size
self.n_actions = n_actions
self.n_func = func
self.n_in = input_size
if self.n_actions > 0:
if func == 'g_hc':
self.wa_f = nn.Parameter(torch.Tensor(2, self.n_actions,
hidden_size, hidden_size
))
self.wa_b = nn.Parameter(torch.Tensor(2, self.n_actions,
hidden_size, hidden_size
))
self.func = lambda a,w: torch.mul(a,F.sigmoid(torch.mm(a,w)))
else:
self.wa_f = nn.Parameter(torch.Tensor(self.n_actions,
self.n_in, self.n_in
))
self.wa_b = nn.Parameter(torch.Tensor(self.n_actions,
self.n_in, self.n_in
))
if func == 'mul_s':
self.func = lambda a: F.sigmoid(a)
self.init_weight()
def init_weight(self):
val_range = (3.0/self.n_in)**0.5
self.wa_f.data.uniform_(-val_range, val_range)
self.wa_b.data.uniform_(-val_range, val_range)
def forward(self, inpt, actions=None):
out_f, out_b = [], []
seqlen = inpt.size(0)
if self.n_actions>0 and self.n_func != 'g_hc':
batch = inpt.size(1)
length = inpt.size(0)
a_oh = one_hot(actions, self.n_actions).unsqueeze(2) # [batch x n_actions x 1]
u_f, u_b = [], []
xt_2d = inpt.transpose(0,1).contiguous().view(batch*length, n_in)
for a in range(self.n_actions):
w_if = self.func(self.wa_f[a])
u_i = xt_2d.mm(w_if).view(batch, -1)
u_f.append(u_i)
w_ib = self.func(self.wa_b[a])
u_i = xt_2d.mm(w_ib).view(batch, -1)
u_b.append(u_i)
uf = torch.stack(u_f, 1) # [batch x actions x len*hid]
uf = torch.mul(uf, a_oh).sum(1).view(batch, length, -1)
inpt_f = uf.transpose(0,1).contiguous().view(length*batch, self.wa_f.size(2))
ub = torch.stack(u_b, 1) # [batch x actions x len*hid]
ub = torch.mul(ub, a_oh).sum(1).view(batch, length, -1)
inpt_b = ub.transpose(0,1).contiguous().view(length*batch, self.wa_f.size(2))
else:
inpt_f, inpt_b = inpt, inpt
hx = Variable(torch.zeros(inpt.size(1), self.hidden_size), requires_grad=False).cuda()
for i in range(seqlen):
hx = self.rnn_cf(inpt_f[i], hx)
out_f.append(hx)
hx = Variable(torch.zeros(inpt.size(1), self.hidden_size), requires_grad=False).cuda()
for i in reversed(range(seqlen)):
hx = self.rnn_cb(inpt_b[i], hx)
out_b.append(hx)
out_f = torch.stack(out_f, 0)
out_b = torch.stack(out_b, 0)
return [torch.cat([out_f, out_b], 2)]
class CustomLSTM(nn.Module):
def __init__(self, input_size, hidden_size):
super(CustomLSTM, self).__init__()
RNN_CELLTYPES = {'lstm': nn.LSTMCell, 'gru': nn.GRUCell, 'rnn': nn.RNNCell}
self.rnn_cf = RNN_CELLTYPES['lstm'](input_size, hidden_size)
self.rnn_cb = RNN_CELLTYPES['lstm'](input_size, hidden_size)
self.hidden_size = hidden_size
def forward(self, inpt):
out_f, out_b = [], []
seqlen = inpt.size(0)
hx = Variable(torch.zeros(inpt.size(1), self.hidden_size), requires_grad=False).cuda()
cx = Variable(torch.zeros(inpt.size(1), self.hidden_size), requires_grad=False).cuda()
for i in range(seqlen):
hx, cx = self.rnn_cf(inpt[i], (hx, cx))
out_f.append(hx)
hx = Variable(torch.zeros(inpt.size(1), self.hidden_size), requires_grad=False).cuda()
cx = Variable(torch.zeros(inpt.size(1), self.hidden_size), requires_grad=False).cuda()
for i in reversed(range(seqlen)):
hx, cx = self.rnn_cb(inpt[i], (hx, cx))
out_b.append(hx)
out_f = torch.stack(out_f, 0)
out_b = torch.stack(out_b, 0)
return [torch.cat([out_f, out_b], 2)]
class MatchBRNN(nn.Module):
def __init__(self, input_size, hidden_size, num_layers=1, n_actions=0,
dropout_rate=0, dropout_output=False, rnn_type=nn.LSTM,attn='act',
concat_layers=False, padding=False, bidirectional=False):
super(MatchBRNN, self).__init__()
self.padding = padding
self.dropout_output = dropout_output
self.dropout_rate = dropout_rate
self.num_layers = num_layers
self.concat_layers = concat_layers
self.rnns_f, self.rnns_b = nn.ModuleList(), nn.ModuleList()
if attn == 'act':
self.nonlin = lambda x: F.softmax(x, dim=-1)
self.attention = SeqAttentionAction(2*hidden_size, 2*hidden_size, n_actions)
else:
if self.training:
self.nonlin = lambda x: F.softmax(x, dim=-1)
else:
self.nonlin = lambda x: x
self.attention = SeqAttention(2*hidden_size, 2*hidden_size)
for i in range(num_layers):
input_size = input_size if i == 0 else 2*hidden_size
self.rnns_f.append(MF.SRUCell(input_size, hidden_size,
dropout=dropout_rate,
rnn_dropout=dropout_rate,
use_tanh=1,
bidirectional=False))
self.rnns_b.append(MF.SRUCell(input_size, hidden_size,
dropout=dropout_rate,
rnn_dropout=dropout_rate,
use_tanh=1,
bidirectional=False))
def forward(self, x, x_mask, actions=None):
"""Faster encoding that ignores any padding."""
# Transpose batch and sequence dims
memory = x
# Encode all layers
outputs = [x.transpose(0, 1)]
for i in range(self.num_layers):
rnn_input = outputs[-1]
attn_pools_f, attn_pools_b = [0]*rnn_input.size(0), [0]*rnn_input.size(0)
for c in range(rnn_input.size(0)):
att = self.nonlin(self.attention(memory, rnn_input[c], x_mask, actions))
attn_pool = torch.mul(memory, att.unsqueeze(2)).sum(1)
attn_pools_f[c] = attn_pool.data
del att, attn_pool
for c in reversed(range(rnn_input.size(0))):
att = self.nonlin(self.attention(memory, rnn_input[c], x_mask, actions))
attn_pool = torch.mul(memory, att.unsqueeze(2)).sum(1)
attn_pools_b[c] = attn_pool.data
del att, attn_pool
inputs_f = torch.cat([rnn_input, Variable(torch.stack(attn_pools_f, 0))], 2)
inputs_b = torch.cat([rnn_input, Variable(torch.stack(attn_pools_b, 0))], 2)
rnn_output_f = self.rnns_f[i](inputs_f)[0]
rnn_output_b = self.rnns_b[i](inputs_b)[0]
del inputs_f, inputs_b
rnn_output = torch.cat([rnn_output_f, rnn_output_b], 2)
outputs.append(rnn_output)
# Concat hidden layers
if self.concat_layers:
output = torch.cat(outputs[1:], 2)
else:
output = outputs[-1]
# Transpose back
output = output.transpose(0, 1)
# Dropout on output layer
if self.dropout_output and self.dropout_rate > 0:
output = F.dropout(output,
p=self.dropout_rate,
training=self.training)
return output.contiguous()
class SeqAttnMatch(nn.Module):
"""Given sequences X and Y, match sequence Y to each element in X.
* o_i = sum(alpha_j * y_j) for i in X
* alpha_j = softmax(y_j * x_i)
"""
def __init__(self, input_size, wn=False):
super(SeqAttnMatch, self).__init__()
self.linear = nn.Linear(input_size, input_size)
if wn:
self.linear = weight_norm(self.linear, dim=None)
def forward(self, x, y, y_mask):
"""Input shapes:
x = batch * len1 * h
y = batch * len2 * h
y_mask = batch * len2
Output shapes:
matched_seq = batch * len1 * h
"""
# Project vectors
if self.linear:
x_proj = self.linear(x.view(-1, x.size(2))).view(x.size())
x_proj = F.relu(x_proj)
y_proj = self.linear(y.view(-1, y.size(2))).view(y.size())
y_proj = F.relu(y_proj)
else:
x_proj = x
y_proj = y
# Compute scores
scores = x_proj.bmm(y_proj.transpose(2, 1))
# Mask padding
y_mask = y_mask.unsqueeze(1).expand(scores.size())
scores.data.masked_fill_(y_mask.data, -float('inf'))
# Normalize with softmax
alpha_flat = F.softmax(scores.view(-1, y.size(1)), dim=-1)
alpha = alpha_flat.view(-1, x.size(1), y.size(1))
# Take weighted average
matched_seq = alpha.bmm(y)
return matched_seq
class BilinearSeqAexist(nn.Module):
"""A bilinear attention layer over a sequence X w.r.t y:
* o_i = softmax(x_i'Wy) for x_i in X.
"""
def __init__(self, x_size, wn=False):
super(BilinearSeqAexist, self).__init__()
self.w1 = nn.Linear(x_size, x_size)
self.w2 = nn.Linear(x_size, 1)
def forward(self, x,y):
"""
x = batch * len * h1
y = batch * h2
x_mask = batch * len
"""
Wy = self.w1(y)
xWy = x.bmm(Wy.unsqueeze(2)).squeeze(2)
alpha = F.softmax(xWy, dim=-1).unsqueeze(2)
logits = self.w2(F.relu((x*alpha).sum(1)))
probs = F.sigmoid(logits)
return logits, probs
class BilinearSeqAttn(nn.Module):
"""A bilinear attention layer over a sequence X w.r.t y:
* o_i = softmax(x_i'Wy) for x_i in X.
Optionally don't normalize output weights.
"""
def __init__(self, x_size, y_size, identity=False, wn=False):
super(BilinearSeqAttn, self).__init__()
if not identity:
self.linear = nn.Linear(y_size, x_size)
if wn:
self.linear = weight_norm(self.linear, dim=None)
else:
self.linear = None
def forward(self, x, y, x_mask, actions):
"""
x = batch * len * h1
y = batch * h2
x_mask = batch * len
"""
Wy = self.linear(y) if self.linear is not None else y
xWy = x.bmm(Wy.unsqueeze(2)).squeeze(2)
xWy.data.masked_fill_(x_mask.data, -float('inf'))
if self.training:
# In training we output log-softmax for NLL
alpha = F.log_softmax(xWy, dim=-1)
else:
# ...Otherwise 0-1 probabilities
alpha = F.softmax(xWy, dim=-1)
return alpha
class BilinearSeqAttnMix(nn.Module):
"""A bilinear attention layer over a sequence X w.r.t y:
* o_i = softmax(x_i'Wy) for x_i in X.
Optionally don't normalize output weights.
"""
def __init__(self, x_size, y_size, identity=False, wn=False, ):
super(BilinearSeqAttnMix, self).__init__()
if not identity:
self.linear = nn.Linear(y_size, x_size)
if wn:
self.linear = weight_norm(self.linear, dim=None)
else:
self.linear = None
self.mixing = MixingFeatures(x_size, y_size, final=True)
def forward(self, x, y, y1, x_mask, y_mask):
"""
x = batch * len * h1
y = batch * h2
x_mask = batch * len
"""
s_d, s_q = self.mixing(x, x_mask, y, y_mask)[:2]
#s_d = x; s_q = y1
Wy = self.linear(y1) if self.linear is not None else y1
xWy = (x+s_d).bmm((Wy+s_q).unsqueeze(2)).squeeze(2)
xWy.data.masked_fill_(x_mask.data, -float('inf'))
if self.training:
# In training we output log-softmax for NLL
alpha = F.log_softmax(xWy, dim=-1)
else:
# ...Otherwise 0-1 probabilities
alpha = F.softmax(xWy, dim=-1)
return alpha
class CapsuleLayer(nn.Module):
def __init__(self, num_capsules, num_route_nodes, in_channels, out_channels, kernel_size=None, stride=None,
num_iterations=3):
super(CapsuleLayer, self).__init__()
self.num_route_nodes = num_route_nodes
self.num_iterations = num_iterations
self.num_capsules = num_capsules
if num_route_nodes != -1:
self.route_weights = nn.Parameter(torch.randn(num_capsules, num_route_nodes, in_channels, out_channels))
else:
self.capsules = nn.ModuleList(
[nn.Conv1d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=0) for _ in
range(num_capsules)])
def squash(self, tensor, dim=-1):
squared_norm = (tensor ** 2).sum(dim=dim, keepdim=True)
scale = squared_norm / (1 + squared_norm)
return scale * tensor / torch.sqrt(squared_norm)
def forward(self, x):
if self.num_route_nodes != -1:
# x: [batch * w.h.out_chan(l-1) * num_caps_(l-1)]
priors = x[None, :, :, None, :] @ self.route_weights[:, None, :, :, :]
logits = Variable(torch.zeros(*priors.size())).cuda()
for i in range(self.num_iterations):
probs = softmax(logits, dim=2)
outputs = self.squash((probs * priors).sum(dim=2, keepdim=True))
if i != self.num_iterations - 1:
delta_logits = (priors * outputs).sum(dim=-1, keepdim=True)
logits = logits + delta_logits
else:
outputs = [capsule(x).view(x.size(0), -1, 1) for capsule in self.capsules]
outputs = torch.cat(outputs, dim=-1)
outputs = self.squash(outputs)
return outputs
class CapsNetFin(nn.Module):
"""A bilinear attention layer over a sequence X w.r.t y:
* o_i = softmax(x_i'Wy) for x_i in X.
Optionally don't normalize output weights.
"""
def __init__(self, x_size, y_size, n_actions):
super(CapsNetFin, self).__init__()
# len = 767 //400
self.conv = nn.ModuleList()
hid_cnn = 64
self.cnn_layers = 4
self.max_len = num_classes = 400 #767
self.linear1 = nn.ModuleList()
self.linear2 = nn.ModuleList()
self.v = nn.ParameterList()
self.n_actions = n_actions
for a in range(n_actions):
self.linear1.append(nn.Linear(y_size, x_size//3))
self.linear2.append(nn.Linear(x_size, x_size//3))
self.v.append(nn.Parameter(torch.Tensor(x_size//3)))
self.digit_capsules1 = CapsuleLayer(num_capsules=num_classes, num_route_nodes=x_size//3, in_channels=n_actions,
out_channels=20)
self.digit_capsules2 = CapsuleLayer(num_capsules=num_classes, num_route_nodes=x_size//3, in_channels=n_actions,
out_channels=20)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.v[0].size(0))
for a in range(self.n_actions):
self.v[a].data.uniform_(-stdv, stdv)
def squash(self, tensor, dim=-1):
squared_norm = (tensor ** 2).sum(dim=dim, keepdim=True)
scale = squared_norm / (1 + squared_norm)
return scale * tensor / torch.sqrt(squared_norm)
def forward(self, x, y, x_mask, actions):
"""
x = batch * len * h1
y = batch * h2
x_mask = batch * len
actions = batch * n_actions
"""
init_len = x.size(1)
x = torch.cat((x[:,-1,:x.size(2)//2], x[:,0,x.size(2)//2:]), 1)
outputs = [(torch.mul(self.v[a], F.tanh(self.linear1[a](y)+self.linear2[a](x)))).unsqueeze(-1) for a in range(self.n_actions)]
outputs = torch.cat(outputs, dim=-1)
x = self.squash(outputs) # [batch, x_size, n_actions]
start = self.digit_capsules1(x).squeeze(2).squeeze(2).transpose(0, 1)
end = self.digit_capsules2(x).squeeze(2).squeeze(2).transpose(0, 1)
for i, inp in enumerate([start, end]):
classes = (inp ** 2).sum(dim=-1) ** 0.5
minel = torch.min(classes.data)
if minel > 0:
minel = minel*1e-2
else:
minel = minel*1e-2+minel
if init_len < self.max_len:
classes = classes[:,:init_len]
else:
# problem when pad from the beginning with infinity, NLL has nonzero probability
classes = F.pad(classes, (init_len-self.max_len,0), "constant", minel)
classes.data.masked_fill_(x_mask.data, -float('inf'))
if self.training:
res = F.log_softmax(classes, dim=-1)
else:
res = F.softmax(classes, dim=-1)
if i ==0:
alpha = res
elif i == 1:
beta = res
return alpha[:, :init_len], beta[:, :init_len]
class CapsNetFin_init(nn.Module):
"""A bilinear attention layer over a sequence X w.r.t y:
* o_i = softmax(x_i'Wy) for x_i in X.
Optionally don't normalize output weights.
"""
def __init__(self, x_size, y_size):
super(CapsNetFin_init, self).__init__()
# len = 767 //400
self.conv = nn.ModuleList()
hid_cnn = 64
self.max_len = num_classes = 400 #767
self.cnn_layers = 4
self.linear = nn.Linear(y_size, x_size)
for i in range(self.cnn_layers):
chan_in = hid_cnn
chan_out = hid_cnn
kern = 9
pad = kern//2
step = 2
if i == 0:
step=1
chan_in = x_size
self.conv.append(nn.Conv1d(in_channels=chan_in, out_channels=chan_out, kernel_size=kern, stride=step, padding=pad))
# len = 192 //50
self.primary_capsules = CapsuleLayer(num_capsules=8, num_route_nodes=-1, in_channels=hid_cnn, out_channels=16,
kernel_size=9, stride=2)
# len = 92 //21
self.digit_capsules1 = CapsuleLayer(num_capsules=num_classes, num_route_nodes=16 * 21, in_channels=8,
out_channels=16)
self.digit_capsules2 = CapsuleLayer(num_capsules=num_classes, num_route_nodes=16 * 21, in_channels=8,
out_channels=16)
def forward(self, x, y, x_mask, actions):
"""
x = batch * len * h1
y = batch * h2
x_mask = batch * len
actions = batch * n_actions
"""
Wy = self.linear(y)
init_len = x.size(1)
xWy = x + Wy.unsqueeze(1)
if init_len < self.max_len:
xWy = F.pad(xWy, (0,0,0,self.max_len - init_len,0,0), "constant", 0)
else:
xWy = xWy[:,-self.max_len:,:]
# xWy [batch, seq, hid]
# [emb, hid, k] >> [1, emb, hid, k] >> [k, emb, hid, 1] >> [1, k, emb, hid]
out = xWy.transpose(1,2).contiguous()
for i in range(self.cnn_layers):
out = F.relu(self.conv[i](out))
x = out
x = self.primary_capsules(x)
start = self.digit_capsules1(x).squeeze(2).squeeze(2).transpose(0, 1)
end = self.digit_capsules2(x).squeeze(2).squeeze(2).transpose(0, 1)
for i, inp in enumerate([start, end]):
classes = (inp ** 2).sum(dim=-1) ** 0.5
if init_len < self.max_len:
classes = classes[:,:init_len]
else:
classes = F.pad(classes, (init_len-self.max_len,0), "constant", -float('inf'))
classes.data.masked_fill_(x_mask.data, -float('inf'))
if self.training:
res = F.log_softmax(classes, dim=-1)
else:
res = F.softmax(classes, dim=-1)
if i ==0:
alpha = res
elif i == 1:
beta = res
return alpha[:, :init_len], beta[:, :init_len]
class BilinearSeqAttnAction1(nn.Module):
"""A bilinear attention layer over a sequence X w.r.t y:
* o_i = softmax(x_i'Wy) for x_i in X.
Optionally don't normalize output weights.
"""
def __init__(self, x_size, y_size, n_actions, identity=False, wn=False, func="h"):
super(BilinearSeqAttnAction1, self).__init__()
if wn:
self.wn = lambda x: weight_norm(x, dim=None)
else:
self.wn = lambda x: x
self.weight = nn.Parameter(torch.Tensor(y_size, x_size))
self.bias = nn.Parameter(torch.Tensor(x_size))
self.n_actions = n_actions
self.n_func = func
if func == 'h':
self.wa_h = nn.Parameter(torch.Tensor(self.n_actions, y_size, 1))
def attn(w1, wh):
# w1 [emb x hid]
# wh [emb x 1]
a2 = torch.mul(w1, wh)
score2 = F.softmax(a2.sum(0), dim=-1).unsqueeze(0) # [1 x hid]
wr = torch.mul(w1, score2)
return wr
self.func = lambda a,b : attn(a,b)
elif func == 'eh':
self.wa_h = nn.Parameter(torch.Tensor(self.n_actions, y_size, 1))
self.wa_e = nn.Parameter(torch.Tensor(self.n_actions, 1, x_size))
def attn(w1, wh, we):
# w1 [emb x 3*(2hid)]
# wh [emb x 1]
# we [1 x 3*(2hid)]
a1 = torch.mul(w1, we)
score1 = F.softmax(a1.sum(1), dim=-1).unsqueeze(1) # [emb x 1]
a2 = torch.mul(w1, wh)
score2 = F.softmax(a2.sum(0), dim=-1).unsqueeze(0) # [1 x 3*(2hid)]
wr = torch.mul(w1, score1)
wr = torch.mul(wr, score2)
return wr
self.func = lambda a,b,c : attn(a,b,c)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(-1))
self.weight.data.uniform_(-stdv, stdv)
self.bias.data.zero_()
if self.n_func == 'h':
stdv = 1. / math.sqrt(self.wa_h.size(1))
self.wa_h.data.uniform_(-stdv, stdv)
else:
self.wa_e.data.uniform_(-stdv, stdv)
stdv = 1. / math.sqrt(self.wa_h.size(1))
self.wa_h.data.uniform_(-stdv, stdv)
def forward(self, x, y, x_mask, actions):
"""
x = batch * len * h1
y = batch * h2
x_mask = batch * len
actions = batch * n_actions
"""
a_oh = one_hot(actions, self.n_actions).unsqueeze(2) # [batch x n_actions x 1]
u = []
for a in range(self.n_actions):
if self.n_func == 'h':
w_i = self.func(self.weight, self.wa_h[a])
elif self.n_func == 'eh':
w_i = self.func(self.weight, self.wa_h[a], self.wa_e[a])
u_i = y.mm(w_i)
u.append(u_i)
u = torch.stack(u, 1) # [batch x actions x hid]
Wy = torch.mul(u, a_oh).sum(1) + self.bias
xWy = x.bmm(Wy.unsqueeze(2)).squeeze(2)
xWy.data.masked_fill_(x_mask.data, -float('inf'))
if self.training:
# In training we output log-softmax for NLL
alpha = F.log_softmax(xWy, dim=-1)
else:
# ...Otherwise 0-1 probabilities
alpha = F.softmax(xWy, dim=-1)
return alpha
class BilinearSeqAttnAction(nn.Module):
"""A bilinear attention layer over a sequence X w.r.t y:
* o_i = softmax(x_i'Wy) for x_i in X.
Optionally don't normalize output weights.
"""
def __init__(self, x_size, y_size, n_actions, identity=False, wn=False, func='kconv5'):
super(BilinearSeqAttnAction, self).__init__()
if wn:
self.wn = lambda x: weight_norm(x, dim=None)
else:
self.wn = lambda x: x
self.weight = nn.Parameter(torch.Tensor(y_size, x_size))
self.bias = nn.Parameter(torch.Tensor(x_size))
self.w_conv = nn.ModuleList()
self.n_actions = n_actions
self.cnn_layers = int(func[5:].split('_')[0])
hid_cnn = 64
for i in range(self.cnn_layers):
chan_in = hid_cnn
chan_out = hid_cnn
kern = 3
pad = 1
if i == 0:
chan_in = 1
elif i == self.cnn_layers-1:
#kern = 1
#pad = 0
chan_out = 1
a_conv = nn.ModuleList()
for a in range(self.n_actions):
a_conv.append(nn.Conv2d(chan_in, chan_out, kern, stride=1, padding=pad))
self.w_conv.append(a_conv)
def conv_forw(a):
# w1 [emb x 3*(2hid)]
out = self.weight.unsqueeze(0).unsqueeze(0)
for i in range(self.cnn_layers):
if i != self.cnn_layers-1:
out = F.relu(self.w_conv[i][a](out))
else:
out = self.w_conv[i][a](out)
out = out.squeeze()
return out
self.func = lambda a: conv_forw(a)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(-1))
self.weight.data.uniform_(-stdv, stdv)
self.bias.data.zero_()
def forward(self, x, y, x_mask, actions):
"""
x = batch * len * h1
y = batch * h2
x_mask = batch * len
actions = batch * n_actions
"""
a_oh = one_hot(actions, self.n_actions).unsqueeze(2) # [batch x n_actions x 1]
u = []
for a in range(self.n_actions):
w_i = self.func(a)
u_i = y.mm(w_i)
u.append(u_i)
u = torch.stack(u, 1) # [batch x actions x hid]
Wy = torch.mul(u, a_oh).sum(1)
xWy = x.bmm(Wy.unsqueeze(2)).squeeze(2)
xWy.data.masked_fill_(x_mask.data, -float('inf'))
if self.training:
# In training we output log-softmax for NLL
alpha = F.log_softmax(xWy, dim=-1)
else:
# ...Otherwise 0-1 probabilities
alpha = F.softmax(xWy, dim=-1)
return alpha
class BilinearSeqAttnAction3(nn.Module):
"""A bilinear attention layer over a sequence X w.r.t y:
* o_i = softmax(x_i'Wy) for x_i in X.
Optionally don't normalize output weights.
"""
def __init__(self, x_size, y_size, n_actions, identity=False, wn=False, func='mul_s'):
super(BilinearSeqAttnAction3, self).__init__()
if wn:
self.wn = lambda x: weight_norm(x, dim=None)
else:
self.wn = lambda x: x
self.weight = nn.Parameter(torch.Tensor(y_size, x_size))
self.bias = nn.Parameter(torch.Tensor(x_size))
self.wa = nn.Parameter(torch.Tensor(n_actions,y_size, x_size))
self.ba = nn.Parameter(torch.Tensor(n_actions, x_size))
self.n_actions = n_actions
if func == 'mul':
self.func = lambda a,b: torch.mul(a,b)
elif func == 'mul_s':
self.func = lambda a,b: torch.mul(a,F.sigmoid(b))
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(-1))
self.weight.data.uniform_(-stdv, stdv)
self.bias.data.zero_()
self.ba.data.zero_()
self.wa.data.uniform_(-stdv, stdv)
def forward(self, x, y, x_mask, actions):
"""
x = batch * len * h1
y = batch * h2
x_mask = batch * len
actions = batch * n_actions
"""
a_oh = one_hot(actions, self.n_actions).unsqueeze(2) # [batch x n_actions x 1]
u = []
for a in range(self.n_actions):
w_i = self.func(self.weight, self.wa[a])
u_i = y.mm(w_i)
u.append(u_i)
u = torch.stack(u, 1) # [batch x actions x hid]
b = self.func(self.bias, torch.mm(a_oh.squeeze(2), self.ba))
Wy = torch.mul(u, a_oh).sum(1) + b
xWy = x.bmm(Wy.unsqueeze(2)).squeeze(2)
xWy.data.masked_fill_(x_mask.data, -float('inf'))
if self.training:
# In training we output log-softmax for NLL
alpha = F.log_softmax(xWy, dim=-1)
else:
# ...Otherwise 0-1 probabilities
alpha = F.softmax(xWy, dim=-1)
return alpha
class BilinearSeqAttnAction2(nn.Module):
"""A bilinear attention layer over a sequence X w.r.t y:
* o_i = softmax(x_i'Wy) for x_i in X.
Optionally don't normalize output weights.
"""
def __init__(self, x_size, y_size, n_actions, identity=False, wn=False):
super(BilinearSeqAttnAction2, self).__init__()
if wn:
self.wn = lambda x: weight_norm(x, dim=None)
else:
self.wn = lambda x: x
self.weight = nn.Parameter(torch.Tensor(n_actions,y_size, x_size))
self.bias = nn.Parameter(torch.Tensor(n_actions,x_size))
self.n_actions = n_actions
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(2))
self.weight.data.uniform_(-stdv, stdv)
self.bias.data.uniform_(-stdv, stdv)
def forward(self, x, y, x_mask, actions):
"""
x = batch * len * h1
y = batch * h2
x_mask = batch * len
actions = batch * n_actions
"""
a_onehot = one_hot(actions, self.n_actions)
w = torch.mm(a_onehot, self.weight.view(self.n_actions, -1)).view(x.size(0), self.weight.size(1), self.weight.size(2))
b = torch.mm(a_onehot, self.bias)
Wy = torch.bmm(y.unsqueeze(1), w).squeeze(1) + b
xWy = x.bmm(Wy.unsqueeze(2)).squeeze(2)
xWy.data.masked_fill_(x_mask.data, -float('inf'))
if self.training:
# In training we output log-softmax for NLL
alpha = F.log_softmax(xWy, dim=-1)
else:
# ...Otherwise 0-1 probabilities
alpha = F.softmax(xWy, dim=-1)
return alpha
class PointerNetworkAction(nn.Module):
"""A bilinear attention layer over a sequence X w.r.t y:
* o_i = softmax(x_i'Wy) for x_i in X.
Optionally don't normalize output weights.
"""
def __init__(self, x_size, y_size, n_actions, wn=False, opt=None):
super(PointerNetworkAction, self).__init__()
self.attention = SeqAttentionAction(
x_size,
y_size, opt['n_actions'], drop_r=opt['dropout_rnn'])
self.n_actions = n_actions
self.rnn_cell = MF.SRUCell(
x_size, y_size,
bidirectional=False,dropout=opt['dropout_rnn'],rnn_dropout=opt['dropout_rnn'],
use_tanh=1)
def forward(self, x, x_mask, c0, actions):
"""
x = batch * len * h1
y = batch * h2
x_mask = batch * len
actions = batch * n_actions
"""
s_logits = self.attention(x, c0, x_mask, actions)
s_probs = F.softmax(s_logits, dim=-1)
attn_pool = (x*s_probs.unsqueeze(2)).sum(1)
state = self.rnn_cell(attn_pool, c0=c0)[1]
e_logits = self.attention(x, state, x_mask, actions)
if self.training:
nonlin = lambda x: F.log_softmax(x, dim=-1)
else:
nonlin = lambda x: F.softmax(x, dim=-1)
return nonlin(s_logits), nonlin(e_logits)
class PointerNetwork(nn.Module):
"""A bilinear attention layer over a sequence X w.r.t y:
* o_i = softmax(x_i'Wy) for x_i in X.
Optionally don't normalize output weights.
"""
def __init__(self, x_size, y_size, wn=False, opt=None):
super(PointerNetwork, self).__init__()
self.attention = SeqAttention(
x_size,
y_size, wn=wn, drop_r=opt['dropout_rnn'])
self.rnn_cell = MF.SRUCell(
x_size, y_size,
bidirectional=False,dropout=opt['dropout_rnn'],rnn_dropout=opt['dropout_rnn'],
use_tanh=1)
def forward(self, x, x_mask, c0, actions):
"""
x = batch * len * h1
y = batch * h2
x_mask = batch * len
"""
s_logits = self.attention(x, c0, x_mask, log=True)
s_probs = F.softmax(s_logits, dim=-1)
attn_pool = (x*s_probs.unsqueeze(2)).sum(1)
state = self.rnn_cell(attn_pool, c0=c0)[1]
e_logits = self.attention(x, state, x_mask)
if self.training:
nonlin = lambda x: F.log_softmax(x, dim=-1)
else:
nonlin = lambda x: F.softmax(x, dim=-1)
return nonlin(s_logits), nonlin(e_logits)
class SeqAttentionAction(nn.Module):
"""attention between a sequence and a tensor:
* o_i = softmax(v*tanh(W1x_i+W2y)) for x_i in X.
"""
def __init__(self, x_size, y_size, n_actions, wn=False, drop_r=0.0):
super(SeqAttentionAction, self).__init__()
self.n_actions = n_actions
if wn:
self.wn = lambda x: weight_norm(x, dim=None)
else:
self.wn = lambda x: x
self.w1 = nn.Parameter(torch.Tensor(n_actions,x_size, x_size//4))
self.b1 = nn.Parameter(torch.Tensor(n_actions,x_size//4))
self.w2 = nn.Parameter(torch.Tensor(n_actions,y_size, x_size//4))
self.b2 = nn.Parameter(torch.Tensor(n_actions,x_size//4))
self.v = nn.Parameter(torch.Tensor(n_actions,x_size//4))
self.reset_parameters()
if drop_r>0:
self.dropout = nn.Dropout(drop_r)
self.drop_r = drop_r
def reset_parameters(self):
stdv = 1. / math.sqrt(self.w1.size(2))
self.w1.data.uniform_(-stdv, stdv)
self.b1.data.zero_()
self.w2.data.uniform_(-stdv, stdv)
self.b2.data.zero_()
self.v.data.uniform_(-stdv, stdv)
def get_action_parameters(self, a_onehot, x_size):
w1 = torch.mm(a_onehot, self.w1.view(self.n_actions, -1)).view(x_size[0], self.w1.size(1), self.w1.size(2))
w1 = w1.unsqueeze(1).expand(x_size[0], x_size[1], w1.size(1), w1.size(2))
w1 = w1.contiguous().view(-1,w1.size(2), w1.size(3))
w2 = torch.mm(a_onehot, self.w2.view(self.n_actions, -1)).view(x_size[0], self.w2.size(1), self.w2.size(2))
w2 = w2.unsqueeze(1).expand(x_size[0], x_size[1], w2.size(1), w2.size(2))
w2 = w2.contiguous().view(-1,w2.size(2), w2.size(3))
b1 = torch.mm(a_onehot, self.b1).unsqueeze(1).expand(x_size[0], x_size[1], self.b1.size(1)).contiguous().view(-1, self.b1.size(1))
b2 = torch.mm(a_onehot, self.b2).unsqueeze(1).expand(x_size[0], x_size[1], self.b2.size(1)).contiguous().view(-1, self.b2.size(1))
v = torch.mm(a_onehot, self.v).unsqueeze(1).expand(x_size[0], x_size[1], self.v.size(1)).contiguous().view(-1, self.v.size(1))
return w1, w2, b1, b2, v
def forward(self, x, y, x_mask, actions):
"""
x = batch * len * hdim
y = batch * hdim
x_mask = batch * len
"""
x_flat = x.view(-1, 1, x.size(-1))
y_flat = y.unsqueeze(1).expand(y.size(0), x.size(1), y.size(1)).contiguous().view(-1, 1, y.size(-1))
a_onehot = one_hot(actions, self.n_actions)
w1, w2, b1, b2, v = self.get_action_parameters(a_onehot, [x.size(0), x.size(1), x.size(2)])
x_t = torch.bmm(x_flat, w1).squeeze(1) + b1
y_t = torch.bmm(y_flat, w2).squeeze(1) + b2
inpt = F.tanh(x_t+y_t)
if self.drop_r>0:
inpt = self.dropout(inpt)
inpt = torch.bmm(inpt.unsqueeze(1), v.unsqueeze(2))
scores = inpt.view(x.size(0), x.size(1))
scores.data.masked_fill_(x_mask.data, -float('inf'))
del w1,w2,b1,b2,v,x_flat,y_flat,inpt
return scores
class CriticLinear(nn.Module):
def __init__(self, x_size, y_size, identity=False, num_layers=2, wn=False, nl=4):
super(CriticLinear, self).__init__()
if wn:
self.wn = lambda x: weight_norm(x, dim=None)
else:
self.wn = lambda x: x
self.w1 = self.wn(nn.Linear(x_size, y_size))
if num_layers == 3:
self.w2 = self.wn(nn.Linear(y_size, y_size))
if nl == 3:
self.w3 = self.wn(nn.Linear(y_size, 1))
elif nl == 4:
self.w3 = self.wn(nn.Linear(y_size, y_size))
self.w4 = self.wn(nn.Linear(y_size, 1))
self.nl = nl
self.num_layers = num_layers
def forward(self, x):
c1 = self.w1(x)
if self.num_layers == 3:
c1 = self.w2(F.relu(c1))
if self.nl == 4:
c2 = self.w4(F.relu(self.w3(F.relu(c1))))
else:
c2 = (self.w3(F.relu(c1)))
return c2.squeeze(1)
class PolicyLatent(nn.Module):
def __init__(self, x_size, y_size, n_actions, num_layers=2, identity=False, wn=False, add=1, nl=5):
super(PolicyLatent, self).__init__()
if wn:
self.wn = lambda x: weight_norm(x, dim=None)
else:
self.wn = lambda x: x
self.add = add
self.n_actions = n_actions
if add == 3:
self.w1a = self.wn(nn.Linear(x_size//3, y_size))
self.w1b = self.wn(nn.Linear(x_size//3, y_size))
self.w1c = self.wn(nn.Linear(x_size//3, y_size))
elif add == 2:
self.w1a = self.wn(nn.Linear(x_size//2, y_size))
self.w1b = self.wn(nn.Linear(x_size//2, y_size))
else:
self.w1 = self.wn(nn.Linear(x_size, y_size))
self.num_layers = num_layers
if num_layers == 3:
self.w2 = self.wn(nn.Linear(y_size, y_size))
self.nl = nl
if nl == 3:
self.w3 = self.wn(nn.Linear(y_size, n_actions))
else:
self.w3 = self.wn(nn.Linear(y_size, y_size))
if nl == 4:
self.w4 = self.wn(nn.Linear(y_size, n_actions))
elif nl ==5:
self.w4 = self.wn(nn.Linear(y_size, y_size))
self.w5 = self.wn(nn.Linear(y_size, n_actions))
def forward(self, x):
if self.add==3:
x = self.w1a(x[:,:int(x.size(-1)//3)]) + self.w1b(x[:,int(x.size(-1)//3):2*int(x.size(-1)//3)]) \
+ self.w1c(x[:,2*int(x.size(-1)//3):])
elif self.add==2:
x = self.w1a(x[:,:int(x.size(-1)/2)]) + self.w1b(x[:,int(x.size(-1)/2):])
else:
x = self.w1(x)
if self.num_layers == 3:
x = self.w2(F.relu(x))
if self.nl == 3:
logits = self.w3(F.relu(x))
else:
x = self.w3(F.relu(x))
if self.nl ==4:
logits = self.w4(F.relu(x))
else:
logits = self.w5(F.relu(self.w4(F.relu(x))))
if self.n_actions > 1:
probs = F.softmax(logits, dim=-1)
elif self.n_actions == 1:
probs = F.sigmoid(logits)
return logits, probs
class ControlVector(nn.Module):
def __init__(self, x_size, gate, n_actions, identity=False, wn=False, drop_r=0.0):
super(ControlVector, self).__init__()
d_factor = int(gate.split('_')[-1])
self.gate = gate; self.n_actions = n_actions
if wn:
self.wn = lambda x: weight_norm(x, dim=None)
else:
self.wn = lambda x: x
if 'fc_cat' in self.gate:
self.g = self.wn(nn.Linear(n_actions, x_size*d_factor//2))
self.a1 = self.wn(nn.Linear(x_size, x_size*d_factor//2))
self.a2 = self.wn(nn.Linear(n_actions, x_size*d_factor//2))
elif 'fc_add' in self.gate:
self.g = self.wn(nn.Linear(n_actions, x_size*d_factor))
self.a1 = self.wn(nn.Linear(x_size, x_size*d_factor))
self.a2 = self.wn(nn.Linear(n_actions, x_size*d_factor))
elif 'tanh' in self.gate:
self.w1 = nn.Parameter(torch.Tensor(n_actions,x_size, x_size*d_factor))
self.b1 = nn.Parameter(torch.Tensor(n_actions,x_size*d_factor))
self.w2 = nn.Parameter(torch.Tensor(n_actions,x_size, x_size*d_factor))
self.b2 = nn.Parameter(torch.Tensor(n_actions,x_size*d_factor))
self.reset_parameters()
if drop_r>0:
self.dropout = nn.Dropout(drop_r)
self.drop_r = drop_r
def reset_parameters(self):
stdv = 1. / math.sqrt(self.w1.size(2))
self.w1.data.uniform_(-stdv, stdv)
self.b1.data.zero_()
self.w2.data.uniform_(-stdv, stdv)
self.b2.data.zero_()
def forward(self, x, actions): # x = batch * nhid
a_onehot = one_hot(actions, self.n_actions)
if 'fc_cat' in self.gate:
gate = F.sigmoid(self.g(a_onehot))
a1 = self.a1(x)
a2 = self.a2(a_onehot)
res = F.tanh(torch.cat((a2, a1*gate), 1))
elif 'fc_add' in self.gate:
gate = F.sigmoid(self.g(a_onehot))
a1 = self.a1(x)
a2 = self.a2(a_onehot)
res = F.tanh(a2 + a1*gate)
elif 'tanh' in self.gate:
w1 = torch.mm(a_onehot, self.w1.view(self.n_actions, -1)).view(x.size(0), self.w1.size(1), self.w1.size(2))
b1 = torch.mm(a_onehot, self.b1)
w2 = torch.mm(a_onehot, self.w2.view(self.n_actions, -1)).view(x.size(0), self.w2.size(1), self.w2.size(2))
b2 = torch.mm(a_onehot, self.b2)
x_hat = F.tanh(torch.bmm(x.unsqueeze(1), w1).squeeze(1) + b1)
gate = F.sigmoid(torch.bmm(x.unsqueeze(1), w2).squeeze(1) + b2)
res = x_hat * gate
if self.drop_r>0.0:
return self.dropout(res.contiguous())
else:
return res.contiguous()
class MixingFeatures(nn.Module):
""" mixing features of sequence and a single vector
or
mixing features of two sequences
"""
def __init__(self, x_size, y_size, wn=False, latent=False, final=False):
super(MixingFeatures, self).__init__()
if wn:
self.wn = lambda x: weight_norm(x, dim=None)
else:
self.wn = lambda x: x
self.w1 = self.wn(nn.Linear(y_size, x_size))
self.latent = latent; self.final = final
if not latent and not final:
self.w2 = self.wn(nn.Linear(x_size, x_size//2))
self.w2b = self.wn(nn.Linear(x_size, x_size//2))
self.w3 = self.wn(nn.Linear(x_size//2, 1))
def forward(self, x, x_mask, y, y_mask):
"""
x = batch * dlen * hdim
x_mask = batch * dlen
y = batch * qlen * hdim
"""
y_n = F.tanh(self.w1(y.view(-1, y.size(-1)))).view(y.size(0), y.size(1), x.size(2))
y_p = y_n.permute(0, 2, 1)
A = torch.bmm(x, y_p) # batch * dlen * qlen
A.data.masked_fill_(x_mask.data.unsqueeze(2), -float('inf'))
A.data.masked_fill_(y_mask.data.unsqueeze(1), -float('inf'))
# which context words are most relevant to one of query words
m_alpha_d = F.softmax(torch.max(A, 2)[0], dim=-1)
m_d = torch.mul(x, m_alpha_d.unsqueeze(2)).sum(1)
if self.final:
#s_q = torch.bmm(x.permute(0,2,1), F.softmax(A, dim=1)).permute(0,2,1) # b * qlen * hdim
p_d = F.softmax(A, dim=2)
mask_d = (p_d != p_d).byte()
p_d.data.masked_fill_(mask_d.data, 0.0)
s_d = torch.bmm(p_d, y_n) # b * dlen * hdim
return s_d, m_d, 0
# which question words are most relevant to one of context words
m_alpha_q = F.softmax(torch.max(A, 1)[0], dim=-1)
m_q = torch.mul(y_n, m_alpha_q.unsqueeze(2)).sum(1)
ae_prob = None
if not self.latent:
ae_prob = F.sigmoid(self.w3(F.relu(self.w2b(m_d) + self.w2(m_q))))
return m_d, m_q, ae_prob
class LinearSeqAttn(nn.Module):
"""Self attention over a sequence:
* o_i = softmax(Wx_i) for x_i in X.
"""
def __init__(self, input_size, wn=False):
super(LinearSeqAttn, self).__init__()
if wn:
self.wn = lambda x: weight_norm(x, dim=None)
else:
self.wn = lambda x: x
self.linear = self.wn(nn.Linear(input_size, 1))
def forward(self, x, x_mask):
"""
x = batch * len * hdim
x_mask = batch * len
"""
x_flat = x.view(-1, x.size(-1))
scores = self.linear(x_flat).view(x.size(0), x.size(1))
scores.data.masked_fill_(x_mask.data, -float('inf'))
alpha = F.softmax(scores, dim=-1)
return alpha
class LinearSeqAttnAction(nn.Module):
"""Self attention over a sequence:
* o_i = softmax(Wx_i) for x_i in X.
"""
def __init__(self, input_size,n_actions, wn=False, drop_r=0.0):
super(LinearSeqAttnAction, self).__init__()
self.n_actions = n_actions
if wn:
self.wn = lambda x: weight_norm(x, dim=None)
else:
self.wn = lambda x: x
self.weight = nn.Parameter(torch.Tensor(n_actions,input_size, 1))
self.bias = nn.Parameter(torch.Tensor(n_actions,1))
self.n_actions = n_actions
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
self.bias.data.zero_()
def forward(self, x, x_mask, actions):
"""
x = batch * len * hdim
x_mask = batch * len
"""
x_flat = x.view(-1, x.size(-1))
a_onehot = one_hot(actions, self.n_actions)
w = torch.mm(a_onehot, self.weight.view(self.n_actions, -1)).view(x.size(0), self.weight.size(1), self.weight.size(2))
b = torch.mm(a_onehot, self.bias).unsqueeze(1).expand(x.size(0), x.size(1), self.bias.size(1)).contiguous().view(x_flat.size(0), self.bias.size(1))
w = w.unsqueeze(1).expand(x.size(0), x.size(1), w.size(1), w.size(2)).contiguous().view(x_flat.size(0),w.size(1), w.size(2))
scores = (torch.bmm(x_flat.unsqueeze(1), w).squeeze(1) + b).view(x.size(0), x.size(1))
scores.data.masked_fill_(x_mask.data, -float('inf'))
alpha = F.softmax(scores, dim=-1)
return alpha
class LinearSeqAttnAction_ad1(nn.Module):
"""Self attention over a sequence:
* o_i = softmax(Wx_i) for x_i in X.
"""
def __init__(self, input_size,n_actions, wn=False, drop_r=0.0):
super(LinearSeqAttnAction_ad1, self).__init__()
self.n_actions = n_actions
if wn:
self.wn = lambda x: weight_norm(x, dim=None)
else:
self.wn = lambda x: x
self.v = nn.Parameter(torch.Tensor(n_actions,input_size))
self.w = self.wn(nn.Linear(input_size, input_size))
self.n_actions = n_actions
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.v.size(1))
self.v.data.uniform_(-stdv, stdv)
def forward(self, x, x_mask, actions):
"""
x = batch * len * hdim
x_mask = batch * len
"""
x_flat = x.view(-1, x.size(-1))
a_onehot = one_hot(actions, self.n_actions)
v = torch.mm(a_onehot, self.v).unsqueeze(1).expand(x.size(0), x.size(1), self.v.size(1)).contiguous().view(x_flat.size(0), self.v.size(1))
wx = self.w(x_flat)
scores = torch.mul(wx, F.sigmoid(v)).sum(1).view(x.size(0), x.size(1))
scores.data.masked_fill_(x_mask.data, -float('inf'))
alpha = F.softmax(scores, dim=-1)
return alpha
class LinearSeqAttnAction2(nn.Module):
"""Self attention over a sequence:
* o_i = W_2*relu(W1 x_i) for x_i in X.
"""
def __init__(self, input_size,n_actions, wn=False, drop_r=0.0):
super(LinearSeqAttnAction2, self).__init__()
self.n_actions = n_actions
self.w1 = nn.Parameter(torch.Tensor(n_actions,input_size, input_size//2))
self.b1 = nn.Parameter(torch.Tensor(n_actions,input_size//2))
self.w2 = nn.Parameter(torch.Tensor(n_actions,input_size//2, 1))
self.b2 = nn.Parameter(torch.Tensor(n_actions,1))
self.n_actions = n_actions
self.reset_parameters()
if drop_r>0:
self.dropout = nn.Dropout(drop_r)
self.drop_r = drop_r
def reset_parameters(self):
stdv = 1. / math.sqrt(self.w1.size(2))
self.w1.data.uniform_(-stdv, stdv)
self.b1.data.uniform_(-stdv, stdv)
self.w2.data.uniform_(-stdv, stdv)
self.b2.data.uniform_(-stdv, stdv)
def forward(self, x, x_mask, actions):
"""
x = batch * len * hdim
x_mask = batch * len
"""
x_flat = x.view(-1, x.size(-1))
a_onehot = one_hot(actions, self.n_actions)
w1 = torch.mm(a_onehot, self.w1.view(self.n_actions, -1)).view(x.size(0), self.w1.size(1), self.w1.size(2))
b1 = torch.mm(a_onehot, self.b1).unsqueeze(1).expand(x.size(0), x.size(1), self.b1.size(1)).contiguous().view(x_flat.size(0), self.b1.size(1))
w1 = w1.unsqueeze(1).expand(x.size(0), x.size(1), w1.size(1), w1.size(2)).contiguous().view(x_flat.size(0),w1.size(1), w1.size(2))
w2 = torch.mm(a_onehot, self.w2.view(self.n_actions, -1)).view(x.size(0), self.w2.size(1), self.w2.size(2))
b2 = torch.mm(a_onehot, self.b2).unsqueeze(1).expand(x.size(0), x.size(1), self.b2.size(1)).contiguous().view(x_flat.size(0), self.b2.size(1))
w2 = w2.unsqueeze(1).expand(x.size(0), x.size(1), w2.size(1), w2.size(2)).contiguous().view(x_flat.size(0),w2.size(1), w2.size(2))
scores = F.relu(torch.bmm(x_flat.unsqueeze(1), w1) + b1.unsqueeze(1))
if self.drop_r>0:
scores=self.dropout(scores)
scores = (torch.bmm(scores, w2).squeeze(1) + b2).view(x.size(0), x.size(1))
scores.data.masked_fill_(x_mask.data, -float('inf'))
alpha = F.softmax(scores, dim=-1)
return alpha
class SeqAttention(nn.Module):
"""attention between a sequence and a tensor:
* o_i = softmax(v*tanh(W1x_i+W2y)) for x_i in X.
"""
def __init__(self, x_size, y_size, wn=False, drop_r=0.0):
super(SeqAttention, self).__init__()
if wn:
self.wn = lambda x: weight_norm(x, dim=None)
else:
self.wn = lambda x: x
self.w1 = self.wn(nn.Linear(x_size, x_size))
self.w2 = self.wn(nn.Linear(y_size, x_size))
self.v = self.wn(nn.Linear(x_size, 1))
if drop_r>0:
self.dropout = nn.Dropout(drop_r)
self.drop_r = drop_r
def forward(self, x, y, x_mask, actions=None, log=False):
"""
x = batch * len * hdim
y = batch * hdim
x_mask = batch * len
"""
x_flat = x.view(-1, x.size(-1))
y_flat = y.unsqueeze(1).expand(y.size(0), x.size(1), y.size(1)).contiguous().view(-1, y.size(-1))
x_t = self.w1(x_flat)
y_t = self.w2(y_flat)
inpt = F.tanh(x_t+y_t)
if self.drop_r>0:
inpt = self.dropout(inpt)
scores = self.v(inpt).view(x.size(0), x.size(1))
scores.data.masked_fill_(x_mask.data, -float('inf'))
if not log:
return F.softmax(scores, dim=-1)
else:
return F.log_softmax(scores, dim=-1)
class GramMatrix(nn.Module):
def forward(self, features):
a, b = features.size()
G = torch.mm(features, features.t()) # compute the gram product
# we 'normalize' the values of the gram matrix
# by dividing by the number of element in each feature maps.
return G.div(a * b)
class GramMatrix_u(nn.Module):
def forward(self, features):
# features [batch x hid]
a, b = features.size()
G = torch.mul(features, features).sum(1) # compute the gram product
# we 'normalize' the values of the gram matrix
# by dividing by the number of element in each feature maps.
return G.div(b)
class ContentLoss(nn.Module):
def __init__(self, target, weight):
super(ContentLoss, self).__init__()
# we 'detach' the target content from the tree used
self.target = target * weight
# to dynamically compute the gradient: this is a stated value,
# not a variable. Otherwise the forward method of the criterion
# will throw an error.
self.weight = weight
self.criterion = nn.MSELoss(reduce=False)
def forward(self, input):
loss = self.criterion(input * self.weight, self.target)
return loss
class StyleLoss(nn.Module):
def __init__(self, target, weight, u=False):
# u True: target [batch x hid]
# u False: target [hid1 x hid2]
super(StyleLoss, self).__init__()
self.target = target * weight
self.weight = weight
if u:
self.gram = GramMatrix_u()
else:
self.gram = GramMatrix()
self.criterion = nn.MSELoss(reduce=False)
def forward(self, input):
self.G = self.gram(input)
self.G.mul_(self.weight)
loss = self.criterion(self.G, self.target)
return loss
# ------------------------------------------------------------------------------
# Functional
# ------------------------------------------------------------------------------
def softmax(input, dim=1):
transposed_input = input.transpose(dim, len(input.size()) - 1)
softmaxed_output = F.softmax(transposed_input.contiguous().view(-1, transposed_input.size(-1)), dim=-1)
return softmaxed_output.view(*transposed_input.size()).transpose(dim, len(input.size()) - 1)
def uniform_weights(x, x_mask):
"""Return uniform weights over non-masked input."""
alpha = Variable(torch.ones(x.size(0), x.size(1)))
if x.data.is_cuda:
alpha = alpha.cuda()
alpha = alpha * x_mask.eq(0).float()
alpha = alpha / alpha.sum(1, keepdim=True).expand(alpha.size())
return alpha
def weighted_avg(x, weights):
"""x = batch * len * d
weights = batch * len
"""
return weights.unsqueeze(1).bmm(x).squeeze(1)
def make_action(probs):
# sample from multinomial discrete distribution
m = Categorical(probs.contiguous())
actions = m.sample()
logp = m.log_prob(actions)
return actions, logp
def one_hot(actions, n_actions):
#assert len(actions.size()) == 1
a_onehot = torch.FloatTensor(actions.size(0), n_actions).cuda()
a_onehot.zero_()
try:
a_onehot.scatter_(1, actions.data.unsqueeze(1), 1)
except:
a_onehot.scatter_(1, actions.unsqueeze(1), 1)
return Variable(a_onehot)
def cat_entropy(logits,eps=1e-8):
max_logits, _ = torch.max(logits, dim=1)
a0 = logits - max_logits.unsqueeze(1)
ea0 = a0.exp()
z0 = ea0.sum(1).unsqueeze(1)
p0 = ea0 / z0
return (p0 * ((z0+eps).log() - a0)).sum(1)
def make_samples_concrete(logits, s, log_temp, eps=1e-8):
u1 = Variable(torch.from_numpy(np.random.random(s)).float().cuda())
u2 = Variable(torch.from_numpy(np.random.random(s)).float().cuda())
temp = log_temp.exp()
logprobs = F.log_softmax(logits, dim=-1)
# gumbel random variable
g = -(-(u1 + eps).log() + eps).log()
# gumbel trick to sample max from categorical distribution
scores = logprobs + g
_, hard_samples = scores.max(1)
hard_samples_oh = one_hot(hard_samples, scores.size(1))
logprobs_z = scores
g2 = -(-(u2 + eps).log() + eps).log()
scores2 = logprobs + g2
B = (scores2 * hard_samples_oh).sum(1).unsqueeze(1) - logprobs
y = -1. * (u2).log() + (-1. * B).exp()
g3 = -1. * (y).log()
scores3 = g3 + logprobs
# slightly biased…
logprobs_zt = hard_samples_oh * scores2 + ((-1. * hard_samples_oh) + 1.) * scores3
return hard_samples, F.softmax(logprobs_z / temp, dim=-1), F.softmax(logprobs_zt / temp, dim=-1)
def score_sc(pred_s, pred_m, truth):
f1_s, f1_m = [], []
if pred_s:
assert len(pred_s) == len(truth)
for ps, pm, t in zip(pred_s, pred_m, truth):
f1_s += [_f1_score(ps, t)]
f1_m += [_f1_score(pm, t)]
else:
for pm, t in zip(pred_m, truth):
f1_m += [_f1_score(pm, t)]
return np.array(f1_s), np.array(f1_m)
def _f1_score(pred, answers):
def _score(g_tokens, a_tokens):
common = Counter(g_tokens) & Counter(a_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1. * num_same / len(g_tokens)
recall = 1. * num_same / len(a_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
if pred is None or answers is None:
return 0
g_tokens = _normalize_answer(pred).split()
scores = [_score(g_tokens, _normalize_answer(a).split()) for a in answers]
return max(scores)
def _normalize_answer(s):
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s)))) | 1.476563 | 1 |
asusrouter/dataclass.py | Vaskivskyi/asusrouter | 0 | 82180 | <gh_stars>0
"""Dataclass module for AsusRouter"""
from __future__ import annotations
from dataclasses import dataclass
from datetime import datetime
from asusrouter.util.converters import none_or_str
@dataclass
class ConnectedDevice:
"""Connected device class"""
name : str | None = None
mac : str | None = None
ip : str | None = None
ip_method : str | None = None
internet_state : bool | None = None
internet_mode : bool | None = None
connection_type : str | None = None
# WLAN only values
online : bool | None = None
rssi : int | None = None
connected_since : int | None = None
rx_speed : float | None = None
tx_speed : float | None = None
@dataclass
class AsusDevice:
"""Asus device class"""
serial : str | None = None
mac : str | None = None
model : str | None = None
brand : str = "ASUSTek"
fw_major : str | None = None
fw_minor : str | None = None
fw_build : str | None = None
services : str | None = None
led : bool = False
def firmware(self) -> str:
return "{}.{}_{}".format(self.fw_major, self.fw_minor, self.fw_build)
@dataclass
class Key:
"""Key class"""
value : str
value_to_use : str = ""
method : function = none_or_str
def __str__(self) -> str:
"""Return only `value` as default"""
return self.value
def get(self) -> str:
"""
Get the proper value
Returns
-----
`value_to_use` if exists, `value` otherwise
"""
if self.value_to_use != "":
return self.value_to_use
return self.value
@dataclass
class Monitor(dict):
"""
Monitor class
In general this is dict with additions
Properties
-----
`active`: bool flag of monitor being active
`time`: datetime object showing the last time monitor was updated
`ready`: bool flag if monitor was ever loaded
Methods
-----
`start`: set `active` to True
`stop`: set `active` to False
`reset`: set `time` to utcnow()
`finish`: set `ready` to True
"""
active : bool = False
time : datetime | None = None
ready : bool = False
def start(self) -> None:
"""Set to active"""
self.active = True
def stop(self) -> None:
"""Set to not-active"""
self.active = False
def reset(self) -> None:
"""Reset time to utcnow"""
self.time = datetime.utcnow()
def finish(self) -> None:
"""Set ready status to True"""
self.ready = True
| 2.21875 | 2 |
beam_telescope_analysis/examples/scattering_planes.py | SiLab-Bonn/beam-telescope-analysis | 3 | 82308 | ''' Minimal example script in order to show how the scattering plane feature for
track fitting (only available when using the Kalman Filter) has to used.
'''
from beam_telescope_analysis.telescope.dut import ScatteringPlane
def run_analysis():
# Create scattering planes and specifying needed parameters. All scattering planes will be added on the fly (during track fitting)
# and will be sorted automatically according to their z-position.
scattering_planes = [ScatteringPlane(name='ScatteringPlane1',
material_budget=0.01,
translation_x=0,
translation_y=0,
translation_z=1000.0,
rotation_alpha=0,
rotation_beta=0,
rotation_gamma=0),
ScatteringPlane(name='ScatteringPlane2',
material_budget=0.02,
translation_x=0,
translation_y=0,
translation_z=2000.0,
rotation_alpha=0,
rotation_beta=0,
rotation_gamma=0)]
# In the track fitting step, `scattering_planes` needs just to be passed as a parameter to the track fitting fuction.
if __name__ == '__main__': # Main entry point is needed for multiprocessing under windows
run_analysis()
| 1.796875 | 2 |
boa_test/example/MethodTest4.py | EdgeDLT/neo-boa | 79 | 82436 | <gh_stars>10-100
# tested
def Main():
a = 1
b = 10
c = 20
d = add(a, b, 10)
d2 = add(d, d, d)
return d2
def add(a, b, c):
result = a + b + c
return result
| 2.4375 | 2 |
HW2/gradebook_202101-14765_HW20220Assignment_2021-03-11-18-59-42/Libraries/thermodynamics.py | CarlGriffinsteed/UVM-ME144-Heat-Transfer | 7 | 82564 | <filename>HW2/gradebook_202101-14765_HW20220Assignment_2021-03-11-18-59-42/Libraries/thermodynamics.py
""" Object name: Fluid"""
import numpy as np
import scipy
import scipy.optimize
from scipy.constants import convert_temperature
def C2K(T):
return convert_temperature(T,'Celsius','Kelvin')
def C2F(T):
return convert_temperature(T,'Celsius','Fahrenheit')
def F2K(T):
return convert_temperature(T,'Fahrenheit','Kelvin')
def F2C(T):
return convert_temperature(T,'Fahrenheit','Celsius')
def K2F(T):
return convert_temperature(T,'Kelvin','Fahrenheit')
def K2C(T):
return convert_temperature(T,'Kelvin','Celsius')
import scipy.constants as sc
def interpolate_table(target,index,xquantity,yquantity):
return yquantity[index] + \
(yquantity[index+1]-yquantity[index])* \
(target-xquantity[index])/(xquantity[index+1]-xquantity[index])
class Fluid(object):
""" How to:
from NewLibraries import thermodynamics as thermo
fluid_of_interest = thermo.Fluid(material,T) material can be air, water, argon and krypton (see below for ranges)
and the temperature of the fluid T is in Kelvin.
Outputs:
The new object computes thermodynamic properties of air between -150 C and 400 C,
water between 274K and 373K, argon between 100 and 700K and
krypton between 150 and 700 K under 1 atm. Argon, krypton and water were obtained
through http://webbook.nist.gov/chemistry/fluid/
More fluids to be added in the future
fluid_of_interest.beta thermal expansion coefficient
fluid_of_interest.rho density
fluid_of_interest.Cp specific heat
fluid_of_interest.mu dynamic viscosity
fluid_of_interest.k thermal conductivity
fluid_of_interest.nu kinematic viscosity
fluid_of_interest.alpha thermal diffusivity
fluid_of_interest.Pr
"""
def __init__(self,name,T,P = 101325.01):
self.name = name
self.T = T
self.P = P
if P != 101325.01:
print("All available tables are for P=1ATM, reverting to P=101325.01Pa")
self.P = 101325.01
if self.name == 'water':
if T < 274 or T > 373:
print("Temperature is out of bounds for liquid water")
return
Ttab,ptab,rhotab,Cptab,mutab,ktab = \
np.genfromtxt('Tables/water1atm.csv', delimiter=',', skip_header = 1, unpack=True, dtype=float)
Ntab = len(Ttab)
Cptab *= 1e3
nutab = mutab/rhotab
alphatab = ktab/(rhotab*Cptab)
Prtab = nutab/alphatab
dTtab = Ttab[1] - Ttab[0]
# compute beta from -rho(d rho/dT)
betatab = -(1./rhotab)*np.gradient(rhotab)/dTtab
i = int((T-Ttab[0])/dTtab)
if (i == Ntab - 1):
i == Ntab - 2
elif self.name == 'argon':
if T < 100 or T > 700:
print("Temperature is out of bounds for argon")
return
Ttab,ptab,rhotab,Cptab,mutab,ktab = \
np.loadtxt('Tables/Argon1atm.csv', delimiter=',', skiprows = 1, unpack=True, dtype=float)
Ntab = len(Ttab)
Cptab *= 1e3
nutab = mutab/rhotab
alphatab = ktab/(rhotab*Cptab)
Prtab = nutab/alphatab
dTtab = Ttab[1] - Ttab[0]
# compute beta from -rho(d rho/dT)
betatab = -(1./rhotab)*np.gradient(rhotab)/dTtab
i = int((T-Ttab[0])/dTtab)
if (i == Ntab - 1):
i == Ntab - 2
elif self.name == 'krypton':
if T < 150 or T > 740:
print("Temperature is out of bounds for krypton")
return
Ttab,ptab,rhotab,Cptab,mutab,ktab = \
np.loadtxt('Tables/Krypton1atm.csv', delimiter=',', skiprows = 1, unpack=True, dtype=float)
Ntab = len(Ttab)
Cptab *= 1e3
nutab = mutab/rhotab
alphatab = ktab/(rhotab*Cptab)
Prtab = nutab/alphatab
dTtab = Ttab[1] - Ttab[0]
# compute beta from -rho(d rho/dT)
betatab = -(1./rhotab)*np.gradient(rhotab)/dTtab
i = int((T-Ttab[0])/dTtab)
if (i == Ntab - 1):
i == Ntab - 2
elif self.name == 'air':
if T < C2K(-150.) or T > C2K(400.):
print("Temperature is out of bounds of the table for air")
return
Ttab,rhotab,Cptab,ktab,nutab,betatab,Prtab = \
np.genfromtxt('Tables/air1atm.csv', delimiter=',', skip_header = 1, unpack=True, dtype=float)
Ntab = len(Ttab)
Ttab = C2K(Ttab)
Cptab *= 1e3
nutab *= 1e-6
mutab = rhotab*nutab
alphatab = ktab/(rhotab*Cptab)
Prtab = nutab/alphatab
i = 0
while (Ttab[i] < T) and (i<Ntab):
i += 1
i -=1
if (i == Ntab - 1):
i = Ntab - 2
else:
print("warning, no table available for", self.name)
return
self.rho = interpolate_table(T,i,Ttab,rhotab)
self.Cp = interpolate_table(T,i,Ttab,Cptab)
self.mu = interpolate_table(T,i,Ttab,mutab)
self.k = interpolate_table(T,i,Ttab,ktab)
self.nu = interpolate_table(T,i,Ttab,nutab)
self.alpha = interpolate_table(T,i,Ttab,alphatab)
self.Pr = interpolate_table(T,i,Ttab,Prtab)
if (self.name == 'air'):
self.beta = 1./T
else:
self.beta = interpolate_table(T,i,Ttab,betatab)
| 2.453125 | 2 |
src/leetcode_978_longest_turbulent_subarray.py | sungho-joo/leetcode2github | 0 | 82692 | # @l2g 978 python3
# [978] Longest Turbulent Subarray
# Difficulty: Medium
# https://leetcode.com/problems/longest-turbulent-subarray
#
# Given an integer array arr, return the length of a maximum size turbulent subarray of arr.
# A subarray is turbulent if the comparison sign flips between each adjacent pair of elements in the subarray.
# More formally,a subarray [arr[i],arr[i + 1],...,
# arr[j]] of arr is said to be turbulent if and only if:
#
# For i <= k < j:
#
#
# arr[k] > arr[k + 1] when k is odd, and
# arr[k] < arr[k + 1] when k is even.
#
#
# Or, for i <= k < j:
#
# arr[k] > arr[k + 1] when k is even, and
# arr[k] < arr[k + 1] when k is odd.
#
#
#
#
# Example 1:
#
# Input: arr = [9,4,2,10,7,8,8,1,9]
# Output: 5
# Explanation: arr[1] > arr[2] < arr[3] > arr[4] < arr[5]
#
# Example 2:
#
# Input: arr = [4,8,12,16]
# Output: 2
#
# Example 3:
#
# Input: arr = [100]
# Output: 1
#
#
# Constraints:
#
# 1 <= arr.length <= 4 * 10^4
# 0 <= arr[i] <= 10^9
#
#
from typing import List
class Solution:
def maxTurbulenceSize(self, arr: List[int]) -> int:
if len(arr) == 1:
return 1
ans = 1 + int(arr[1] != arr[0])
sub_len = ans
prev = 2 # i-1 > i: 0, i-1 < i : 1, i-1 == i : 2
for i in range(1, len(arr)):
if prev == 0 and arr[i - 1] < arr[i]:
sub_len += 1
elif prev == 1 and arr[i - 1] > arr[i]:
sub_len += 1
else:
ans = max(ans, sub_len)
sub_len = 1 + int(arr[i - 1] != arr[i])
if arr[i - 1] < arr[i]:
prev = 1
if arr[i - 1] > arr[i]:
prev = 0
if arr[i - 1] == arr[i]:
prev = 2
return max(ans, sub_len)
if __name__ == "__main__":
import os
import pytest
pytest.main([os.path.join("tests", "test_978.py")])
| 3 | 3 |
benchipfs/ipfs_benchmark_server.py | chunchuan-wang/droplet-engine | 10 | 82820 | <reponame>chunchuan-wang/droplet-engine<gh_stars>1-10
#© 2017-2020, ETH Zurich, D-INFK, <EMAIL>
from flask import Flask
from flask import request, g
from ipfs_util import run_ipfs_load
from do_benchmark import store_chunks
import json
import argparse
FILE = "bench"
app = Flask("IPFS_Benchmark_Server")
@app.route('/ipfs_bench', methods=['POST'])
def run_benchmark():
addresses = str(request.get_data())
try:
data = addresses.splitlines()
times = run_ipfs_load(data)
with open("%s" % (FILE,), 'w') as file:
for time in times:
file.write("%f\n" % time)
return json.dumps(times)
except Exception as e:
print e
return "ERROR", 400
if __name__ == "__main__":
parser = argparse.ArgumentParser("Run Basic IPFS bench Server")
parser.add_argument('--port', type=int, help='port', default=12000, required=False)
parser.add_argument('--ip', type=str, help='ip', default="0.0.0.0", required=False)
args = parser.parse_args()
app.run(host=args.ip, port=args.port) | 1.492188 | 1 |
main.py | narthur/beeminder-traffic-light | 2 | 82948 | import RPi.GPIO as GPIO
import time
from pyminder.beeminder import Beeminder
import os
import yaml
import time
import datetime
base_dir = os.path.dirname(os.path.realpath(__file__))
config = yaml.load(open(f"{base_dir}/config.yaml", "r"), Loader=yaml.FullLoader)
bm = Beeminder()
bm.set_username(config['beeminder']['user'])
bm.set_token(config['beeminder']['token'])
def tick():
lose_date = get_lose_date()
pin = get_pin(lose_date)
interval = get_interval(lose_date)
power_light(pin, interval)
def get_lose_date():
goals = bm.get_goals()
lose_dates = {g['slug']: g['losedate'] for g in goals}
key = min(lose_dates, key=lose_dates.get)
print(key)
return lose_dates[key]
def get_pin(lose_date):
today = datetime.datetime.today()
tomorrow = today + datetime.timedelta(days=1)
red_bound = today.replace(hour=23, minute=59, second=59).timestamp()
yellow_bound = tomorrow.replace(hour=23, minute=59, second=59).timestamp()
if lose_date <= red_bound:
# red
return 9
elif lose_date <= yellow_bound:
# yellow
return 10
else:
# green
return 11
def get_interval(lose_date):
now = time.time()
remaining = lose_date - now
interval = lose_date / 2
return min(interval, 60 * 10)
def power_light(pin, seconds):
if pin not in [9, 10, 11]:
return
for x in range(0, int(seconds / 2)):
GPIO.output(pin, True)
time.sleep(1)
GPIO.output(pin, False)
time.sleep(1)
# Pin Setup:
GPIO.setmode(GPIO.BCM) # Broadcom pin-numbering scheme. This uses the pin numbers that match the pin numbers on the Pi Traffic light.
GPIO.setup(9, GPIO.OUT) # Red LED pin set as output
GPIO.setup(10, GPIO.OUT) # Yellow LED pin set as output
GPIO.setup(11, GPIO.OUT) # Green LED pin set as output
try:
while True:
tick()
except KeyboardInterrupt:
print('Interrupted')
finally:
GPIO.cleanup() | 1.65625 | 2 |
math/0x04-convolutions_and_pooling/test/5-main.py | cbarros7/holbertonschool-machine_learning | 1 | 83076 | <gh_stars>1-10
#!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
convolve = __import__('5-convolve').convolve
if __name__ == '__main__':
dataset = np.load('../../supervised_learning/data/animals_1.npz')
images = dataset['data']
print(images.shape)
kernels = np.array([[[[0, 1, 1], [0, 1, 1], [0, 1, 1]], [[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]], [[0, -1, 1], [0, -1, 1], [0, -1, 1]]],
[[[-1, 1, 0], [-1, 1, 0], [-1, 1, 0]], [[5, 0, 0], [5, 0, 0], [5, 0, 0]], [[-1, -1, 0], [-1, -1, 0], [-1, -1, 0]]],
[[[0, 1, -1], [0, 1, -1], [0, 1, -1]], [[-1, 0, -1], [-1, 0, -1], [-1, 0, -1]], [[0, -1, -1], [0, -1, -1], [0, -1, -1]]]])
images_conv = convolve(images, kernels, padding='valid')
print(images_conv.shape)
plt.imshow(images[0])
plt.show()
plt.imshow(images_conv[0, :, :, 0])
plt.show()
plt.imshow(images_conv[0, :, :, 1])
plt.show()
plt.imshow(images_conv[0, :, :, 2])
plt.show() | 1.90625 | 2 |
main.py | fredmnl/babyfoot | 0 | 83204 | <reponame>fredmnl/babyfoot
import sys
from fastapi import FastAPI
from fastapi.responses import HTMLResponse, FileResponse
app = FastAPI()
import api
@app.get("/api/getPlayers/")
async def read_root():
return api.getPlayers()
@app.get("/api/getGames/")
async def read_root():
return api.getGames()
@app.get("/api/insertGame/")
async def read_root(blueDefense: str, blueOffense: str, redDefense: str, redOffense: str, blueScore: int, redScore: int):
if (blueScore != 5 and redScore != 5) or (blueScore == 5 and redScore == 5):
return {"status": "NOK", "reason": f"Invalid score : {blueScore}-{redScore}"}
knownPlayers = [player["name"] for player in api.getPlayers()]
for player in [blueDefense, blueOffense, redDefense, redOffense]:
if player not in knownPlayers:
return {"status": "NOK", "reason": f"Unknown player: {player}"}
if blueDefense in [redDefense, redOffense]:
return {"status": "NOK", "reason": f"Invalid teams: {blueDefense} cannot be in both teams"}
if blueOffense in [redDefense, redOffense]:
return {"status": "NOK", "reason": f"Invalid teams: {blueOffense} cannot be in both teams"}
result = api.insertGame(blueDefense, blueOffense, redDefense, redOffense, blueScore, redScore)
result["status"] = "OK"
return result
@app.get("/api/addPlayer/")
async def read_root(player: str):
knownPlayers = [player["name"] for player in api.getPlayers()]
if player in knownPlayers:
return {"status": "NOK", "reason": f"Player already known: {player}"}
api.addPlayer(player)
return {"status": "OK"}
| 1.632813 | 2 |
orc8r/gateway/python/magma/magmad/upgrade/feg_upgrader.py | QiuYulong/magma | 3 | 83332 | """
Copyright (c) 2016-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
import asyncio
import logging
import pathlib
import os
import shutil
from magma.common.service import MagmaService
from magma.magmad.upgrade.magma_upgrader import compare_package_versions
from magma.magmad.upgrade.upgrader import UpgraderFactory
from magma.magmad.upgrade.upgrader2 import ImageNameT, run_command, \
UpgradeIntent, Upgrader2, VersionInfo, VersionT
IMAGE_INSTALL_DIR = '/var/cache/magma_feg'
IMAGE_INSTALL_SCRIPT = IMAGE_INSTALL_DIR + '/install.sh'
class FegUpgrader(Upgrader2):
"""
Downloads and installs the federation gateway images
"""
def version_to_image_name(self, version: VersionT) -> ImageNameT:
"""
Returns the image format from the version string.
(i.e) 0.3.68-1541626353-d1c29db1 -> magma_feg_d1c29db1.zip
"""
parts = version.split("-")
if len(parts) != 3:
raise ValueError("Unknown version format: %s" % version)
return ImageNameT("magma_feg_%s.zip" % parts[2])
async def get_upgrade_intent(self) -> UpgradeIntent:
"""
Returns the desired version for the gateway.
We don't support downgrading, and so checks are made to update
only if the target version is higher than the current version.
"""
tgt_version = self.service.mconfig.package_version
curr_version = self.service.version
if (tgt_version == "0.0.0-0" or
compare_package_versions(curr_version, tgt_version) <= 0):
tgt_version = curr_version
return UpgradeIntent(stable=VersionT(tgt_version), canary=VersionT(""))
async def get_versions(self) -> VersionInfo:
""" Returns the current version """
return VersionInfo(
current_version=self.service.version,
available_versions=set(),
)
async def prepare_upgrade(
self, version: VersionT, path_to_image: pathlib.Path
) -> None:
""" No-op for the feg upgrader """
return
async def upgrade(
self, version: VersionT, path_to_image: pathlib.Path
) -> None:
""" Time to actually upgrade the Feg using the image """
# Extract the image to the install directory
shutil.rmtree(IMAGE_INSTALL_DIR, ignore_errors=True)
os.mkdir(IMAGE_INSTALL_DIR)
await run_command("unzip", str(path_to_image), "-d", IMAGE_INSTALL_DIR)
logging.info("Running image install script: %s", IMAGE_INSTALL_SCRIPT)
await run_command(IMAGE_INSTALL_SCRIPT)
class FegUpgraderFactory(UpgraderFactory):
""" Returns an instance of the FegUpgrader """
def create_upgrader(
self,
magmad_service: MagmaService,
loop: asyncio.AbstractEventLoop,
) -> FegUpgrader:
return FegUpgrader(magmad_service)
| 1.53125 | 2 |
projects/mars/model_classes/UrineProcessorAssembly.py | ModelFlow/modelflow | 6 | 83460 | <reponame>ModelFlow/modelflow<gh_stars>1-10
class UrineProcessorAssembly:
name = "Urine Processor Assembly"
params = [
{
"key": "max_urine_consumed_per_hour",
"label": "",
"units": "kg/hr",
"private": False,
"value": 0.375,
"confidence": 0,
"notes": "9 kg/day / 24 per wikipedia",
"source": "https://en.wikipedia.org/wiki/ISS_ECLSS"
},
{
"key": "min_urine_consumed_per_hour",
"label": "",
"units": "kg/hr",
"private": False,
"value": 0.1,
"confidence": 0,
"notes": "",
"source": "fake"
},
{
"key": "dc_kwh_consumed_per_hour",
"label": "",
"units": "kwh",
"private": False,
"value": 1.501,
"confidence": 0,
"notes": "TODO: Should be per kg input",
"source": "https://simoc.space/wp-content/uploads/2020/06/simoc_agent_currencies-20200601.pdf"
},
{
"key": "efficiency",
"label": "",
"units": "decimal %",
"private": False,
"value": 0.85,
"confidence": 0,
"notes": "Not sure if this is accurate",
"source": "https://en.wikipedia.org/wiki/ISS_ECLSS"
},
{
"key": "mass",
"label": "",
"units": "kg",
"private": False,
"value": 193.3,
"confidence": 0,
"notes": "",
"source": "https://simoc.space/wp-content/uploads/2020/06/simoc_agent_currencies-20200601.pdf"
},
{
"key": "volume",
"label": "",
"units": "m3",
"private": False,
"value": 0.39,
"confidence": 0,
"notes": "",
"source": "https://simoc.space/wp-content/uploads/2020/06/simoc_agent_currencies-20200601.pdf"
}
]
states = []
@staticmethod
def run_step(states, params, utils):
if states.urine < params.min_urine_consumed_per_hour:
return
if states.available_dc_kwh < params.dc_kwh_consumed_per_hour:
return
urine_processed = min(states.urine, params.max_urine_consumed_per_hour)
states.urine -= urine_processed
states.available_dc_kwh -= min(states.available_dc_kwh, params.dc_kwh_consumed_per_hour)
states.unfiltered_water += urine_processed
| 1.390625 | 1 |
{{cookiecutter.project_slug}}/app/auth/auth.py | elben10/cookiecutter-dash-full-stack | 4 | 83588 | from __future__ import absolute_import
from abc import ABCMeta, abstractmethod
from six import add_metaclass, iteritems
@add_metaclass(ABCMeta)
class Auth(object):
def __init__(
self, app, authorization_hook=None, _overwrite_index=True, protect_assets=False
):
self.app = app
self._index_view_name = app.config["routes_pathname_prefix"]
if _overwrite_index:
self._overwrite_index()
self._protect_views(protect_assets=protect_assets)
self._index_view_name = app.config["routes_pathname_prefix"]
self._auth_hooks = [authorization_hook] if authorization_hook else []
def _overwrite_index(self):
original_index = self.app.server.view_functions[self._index_view_name]
self.app.server.view_functions[self._index_view_name] = self.index_auth_wrapper(
original_index
)
def _protect_views(self, protect_assets=False):
# TODO - allow users to white list in case they add their own views
for view_name, view_method in iteritems(self.app.server.view_functions):
if view_name != self._index_view_name:
if not protect_assets:
if view_name == "_dash_assets.static":
continue
self.app.server.view_functions[view_name] = self.auth_wrapper(
view_method
)
def is_authorized_hook(self, func):
self._auth_hooks.append(func)
return func
@abstractmethod
def is_authorized(self):
pass
@abstractmethod
def auth_wrapper(self, f):
pass
@abstractmethod
def index_auth_wrapper(self, f):
pass
@abstractmethod
def login_request(self):
pass
| 1.773438 | 2 |
pub_site/src/pub_site/notify/task.py | webee/pay | 1 | 83716 | # coding=utf-8
from __future__ import unicode_literals
from datetime import datetime, timedelta
from pub_site.constant import WithdrawState
from pub_site.withdraw import dba as withdraw_dba
from pub_site import pay_client
from pub_site.sms import sms
from tools.utils import to_bankcard_mask
def fetch_notify_withdraw_result(minutes):
now = datetime.utcnow()
d = timedelta(minutes=minutes)
t = now - d
withdraw_records = withdraw_dba.get_requested_withdraw_record_before(t)
for withdraw_record in withdraw_records:
user_id = withdraw_record.user_id
sn = withdraw_record.sn
data = pay_client.query_withdraw(user_id, sn)
if data is None:
continue
is_success = is_withdraw_result_success(data['code'])
if is_success is not None:
notify_user_withdraw_result(is_success, withdraw_record)
def is_withdraw_result_success(code):
if code not in [0, '0', 1, '1']:
return None
return code in [0, '0']
def notify_user_withdraw_result(is_success, withdraw_record):
msg = _build_msg(is_success, withdraw_record)
notified = sms.send(withdraw_record.phone_no, msg)
if not notified:
# 失败再尝试一次,TODO: 使用celery.
notified = sms.send(withdraw_record.phone_no, msg)
if notified:
new_state = WithdrawState.SUCCESS if is_success else WithdrawState.FAILED
withdraw_dba.update_withdraw_state(withdraw_record.sn, withdraw_record.user_id, new_state)
return True
def _build_msg(is_success, withdraw_record):
user_id = withdraw_record.user_id
bankcard_id = withdraw_record.bankcard_id
bc = pay_client.app_get_user_bankcard(user_id, bankcard_id)
params = {
'created_on': withdraw_record.created_on,
'amount': withdraw_record.amount,
'bank_name': bc['bank_name'],
'card_no': to_bankcard_mask(bc['card_no'])
}
if is_success:
params['actual_amount'] = withdraw_record.actual_amount
params['fee'] = withdraw_record.fee
msg = "您于{created_on}提现{amount}到{bank_name}({card_no})的请求已处理,实际金额: {actual_amount}, 手续费: {fee}; 正等待到账,请留意银行卡到账信息。"
else:
msg = "您于{created_on}提现{amount}到{bank_name}({card_no})的请求失败。"
return msg.format(**params)
| 1.492188 | 1 |
ietf/group/admin.py | MatheusProla/Codestand | 2 | 83844 | from functools import update_wrapper
from django.contrib import admin
from django.contrib.admin.utils import unquote
from django.core.exceptions import PermissionDenied
from django.core.management import load_command_class
from django.http import Http404
from django.shortcuts import render
from django.utils.encoding import force_unicode
from django.utils.html import escape
from django.utils.translation import ugettext as _
from ietf.group.models import Group, GroupHistory, GroupEvent, GroupURL, GroupMilestone, Role, RoleHistory, ChangeStateGroupEvent
class RoleInline(admin.TabularInline):
model = Role
raw_id_fields = ["person", "email"]
class GroupURLInline(admin.TabularInline):
model = GroupURL
class GroupAdmin(admin.ModelAdmin):
list_display = ["acronym", "name", "type", "state", "time", "role_list"]
list_display_links = ["acronym", "name"]
list_filter = ["type", "state", "time"]
search_fields = ["acronym", "name"]
ordering = ["name"]
raw_id_fields = ["charter", "parent"]
inlines = [RoleInline, GroupURLInline]
prepopulated_fields = {"acronym": ("name", )}
def role_list(self, obj):
roles = Role.objects.filter(group=obj).order_by("name", "person__name").select_related('person')
res = []
for r in roles:
res.append(u'<a href="../../person/person/%s/">%s</a> (<a href="../../group/role/%s/">%s)' % (r.person.pk, escape(r.person.plain_name()), r.pk, r.name.name))
return ", ".join(res)
role_list.short_description = "Persons"
role_list.allow_tags = True
# SDO reminder
def get_urls(self):
from ietf.utils.urls import url
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
info = self.model._meta.app_label, self.model._meta.model_name
urls = [
url(r'^reminder/$', wrap(self.send_reminder), name='%s_%s_reminder' % info),
url(r'^(.+)/reminder/$', wrap(self.send_one_reminder), name='%s_%s_one_reminder' % info),
]
urls += super(GroupAdmin, self).get_urls()
return urls
def send_reminder(self, request, sdo=None):
opts = self.model._meta
app_label = opts.app_label
output = None
sdo_pk = sdo and sdo.pk or None
if request.method == 'POST' and request.POST.get('send', False):
command = load_command_class('ietf.liaisons', 'remind_update_sdo_list')
output=command.handle(return_output=True, sdo_pk=sdo_pk)
output='\n'.join(output)
context = {
'opts': opts,
'has_change_permission': self.has_change_permission(request),
'app_label': app_label,
'output': output,
'sdo': sdo,
}
return render(request, 'admin/group/group/send_sdo_reminder.html', context )
def send_one_reminder(self, request, object_id):
model = self.model
opts = model._meta
try:
obj = self.queryset(request).get(pk=unquote(object_id))
except model.DoesNotExist:
obj = None
if not self.has_change_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {'name': force_unicode(opts.verbose_name), 'key': escape(object_id)})
return self.send_reminder(request, sdo=obj)
admin.site.register(Group, GroupAdmin)
class GroupHistoryAdmin(admin.ModelAdmin):
list_display = ["time", "acronym", "name", "type"]
list_display_links = ["acronym", "name"]
list_filter = ["type"]
search_fields = ["acronym", "name"]
ordering = ["name"]
raw_id_fields = ["group", "parent"]
admin.site.register(GroupHistory, GroupHistoryAdmin)
class GroupMilestoneAdmin(admin.ModelAdmin):
list_display = ["group", "desc", "due", "resolved", "time"]
search_fields = ["group__name", "group__acronym", "desc", "resolved"]
raw_id_fields = ["group", "docs"]
admin.site.register(GroupMilestone, GroupMilestoneAdmin)
class RoleAdmin(admin.ModelAdmin):
list_display = ["name", "person", "email", "group"]
list_display_links = ["name"]
search_fields = ["name__name", "person__name", "email__address"]
list_filter = ["name", "group"]
ordering = ["id"]
raw_id_fields = ["email", "person", "group"]
admin.site.register(Role, RoleAdmin)
admin.site.register(RoleHistory, RoleAdmin)
class GroupEventAdmin(admin.ModelAdmin):
list_display = ["id", "group", "time", "type", "by", ]
search_fields = ["group__name", "group__acronym"]
admin.site.register(GroupEvent, GroupEventAdmin)
class ChangeStateGroupEventAdmin(admin.ModelAdmin):
list_display = ["id", "group", "state", "time", "type", "by", ]
list_filter = ["state", "time", ]
search_fields = ["group__name", "group__acronym"]
admin.site.register(ChangeStateGroupEvent, ChangeStateGroupEventAdmin)
| 1.28125 | 1 |
pietoolbelt/pipeline/stratification.py | PiePline/PieToolbelt | 1 | 83972 | import json
from multiprocessing import Pool
from random import randint
from typing import List, Dict, Callable, Any
import numpy as np
import os
from tqdm import tqdm
from pietoolbelt.datasets.common import BasicDataset
from pietoolbelt.pipeline.abstract_step import AbstractStep, DatasetInPipeline, AbstractStepDirResult
class StratificationResult(AbstractStepDirResult):
def __init__(self, path: str):
super().__init__(path)
self._meta_file = os.path.join(path, 'meta.json')
if os.path.exists(self._meta_file):
with open(self._meta_file, 'r') as meta_file:
self._meta = json.load(meta_file)
else:
self._meta = dict()
self._name2file = lambda name: name + '.npy' if len(name) < 4 or name[-4:] != '.npy' else name
self._name2path = lambda name: os.path.join(self._path, self._name2file(name))
def add_indices(self, indices: List[np.uint], name: str, dataset: BasicDataset):
dataset.set_indices(indices).flush_indices(self._name2path(name))
self._meta[name] = {'indices_num': len(indices)}
with open(self._meta_file, 'w') as meta_file:
json.dump(self._meta, meta_file)
def get_folds(self) -> List[str]:
return list(self._meta.keys())
def get_indices(self, name: str) -> List[np.ndarray]:
file_path = os.path.join(self._path, self._name2file(name))
if not os.path.exists(file_path):
raise RuntimeError('Indices file doesnt exists [{}]'.format(file_path))
return np.load(file_path)
def get_output_paths(self) -> List[str]:
return [self._path]
class DatasetStratification:
def __init__(self, dataset: BasicDataset, calc_target_label: Callable[[Any], Any], result: StratificationResult, workers_num: int = 0):
self._dataset = dataset
self._calc_label = calc_target_label
self._progress_clbk = None
self._workers_num = workers_num
self._result = result
@staticmethod
def __fill_hist(target_hist: [], indices: {}):
def pick(d):
idx = randint(0, len(indices[d]) - 1)
res = indices[d][idx]
del indices[d][idx]
return res
res = {}
for idx, d in enumerate(target_hist):
idxes = []
for _ in range(d):
idxes.append(pick(idx))
res[idx] = idxes
return res
def calc_hist(self, dataset: BasicDataset):
labels = []
if self._workers_num > 1:
with Pool(self._workers_num) as pool, tqdm(total=len(dataset)) as pbar:
for label in pool.imap(self._calc_label, dataset.get_items(), chunksize=self._workers_num * 10):
labels.append(label)
pbar.update()
else:
for d in tqdm(dataset.get_items(), total=len(dataset)):
labels.append(self._calc_label(d))
hist = [[] for _ in range(max(labels))]
for i, idxes in enumerate(labels):
hist[idxes - 1].append(i)
return np.array([len(v) for v in hist]), hist
def cal_multi_hist(self, dataset: BasicDataset):
labels = []
if self._workers_num > 1:
with Pool(self._workers_num) as pool, tqdm(total=len(dataset)) as pbar:
for label in pool.imap(self._calc_label, dataset.get_items(), chunksize=self._workers_num * 10):
labels.append(label)
pbar.update()
else:
for d in tqdm(dataset.get_items(), total=len(dataset)):
labels.append(self._calc_label(d))
percent = np.percentile(np.array(labels)[:, 1], np.linspace(0, 100, 10)).tolist()
out_p = []
for p in percent:
if percent.index(p) % 2 != 0:
out_p.append(p)
hist_1 = [[] for _ in range(int(max(np.array(labels)[:, 0])) + 1)]
for i, idxes in enumerate(labels):
hist_1[int(idxes[0])].append(i)
hist_2 = [[] for _ in range(len(out_p))]
for i, idxes in enumerate(labels):
for p in range(len(out_p)):
if p == 0 and idxes[1] <= out_p[p]:
hist_2[p].append(i)
elif p != 0 and out_p[p - 1] < idxes[1] <= out_p[p]:
hist_2[p].append(i)
hist = [[] for _ in range(len(hist_1) * len(hist_2))]
z = lambda x, y: [y.index(h) if x in h else -1 for h in y]
for i, idxes in enumerate(labels):
index_h1, index_h2 = self.get_hist_idx(i, hist_1), self.get_hist_idx(i, hist_2)
if index_h2 == -1 or index_h1 == -1:
raise Exception("Index error in histograms")
hist[int(index_h1 * index_h2) - 1].append(i)
return np.array([len(v) for v in hist]), hist
def stratificate_dataset(self, hist: np.ndarray, indices: list, parts: [float]) -> []:
res = []
for part in parts[:len(parts) - 1]:
target_hist = (hist.copy() * part).astype(np.uint32)
res.append([target_hist, self.__fill_hist(target_hist, indices)])
res.append([np.array([len(i) for i in indices]).astype(np.uint32), {i: v for i, v in enumerate(indices)}])
return res
@staticmethod
def get_hist_idx(x, hist):
res = -1
for h in hist:
res = hist.index(h) if x in h else res
return res
@staticmethod
def check_indices_for_intersection(indices: []):
for i in range(len(indices)):
for index in indices[i]:
for other_indices in indices[i + 1:]:
if index in other_indices:
raise Exception('Indices intersects')
def balance_classes(self, hist: np.ndarray, indices: {}) -> tuple:
target_hist = hist.copy()
target_hist[np.argmax(target_hist)] = np.sum(target_hist[target_hist != target_hist.max()])
return target_hist, self.__fill_hist(target_hist, indices)
def _flush_indices(self, indices: [], part_indices: [], path: str):
inner_indices = [part_indices[it] for bin in indices[1].values() for it in bin]
self._result.add_indices(indices=inner_indices, name=path, dataset=self._dataset)
return inner_indices
def run(self, parts: {str: float}, multi_hist=False) -> None:
if sum(parts.values()) > 1:
raise RuntimeError("Sum of target parts greater than 1")
parts = [[path, part] for path, part in parts.items()]
pathes = [p[0] for p in parts]
parts = [p[1] for p in parts]
part_indices = {i: i for i in range(len(self._dataset))}
hist, indices = self.cal_multi_hist(self._dataset) if multi_hist else self.calc_hist(self._dataset)
stratificated_indices = self.stratificate_dataset(hist, indices, parts)
indices_to_check = []
for i, cur_indices in enumerate(stratificated_indices):
indices_to_check.append(self._flush_indices(cur_indices, part_indices, pathes[i]))
self._dataset.remove_indices()
self.check_indices_for_intersection(indices_to_check)
class PipelineDatasetStratification(DatasetStratification, AbstractStep):
def __init__(self, dataset: DatasetInPipeline, calc_target_label: callable, result: StratificationResult, workers_num: int = 1):
DatasetStratification.__init__(self, dataset, calc_target_label, result=result, workers_num=workers_num)
AbstractStep.__init__(self, input_results=[dataset], output_res=result)
| 1.390625 | 1 |
sample/docs/source/alumni/parse.py | hdknr/flier | 0 | 84100 | <reponame>hdknr/flier<gh_stars>0
from docutils.core import publish_doctree
import sys
from bs4 import BeautifulSoup as Soup
import yaml
import json
def to_yaml(result):
# YAML
print yaml.dump(
yaml.load(json.dumps(result)), default_flow_style=False,
allow_unicode=True)
def to_json(result):
# JSON
print json.dumps(result, ensure_ascii=False, indent=2)
def to_source(rows):
print "#", ", ".join(['u"%s"' % s for s in rows[0]])
for row in rows[1:]:
print "[", ", ".join(['u"%s"' % s for s in row]), "],"
doctree = publish_doctree(open(sys.argv[1]).read())
dom = doctree.asdom()
soup = Soup(dom.toprettyxml(encoding='utf8'))
rows = []
for row in soup.select('tbody row'):
rows.append([
e.select('paragraph') and e.select('paragraph')[0].text or ''
for e in row.select('entry')])
result = []
for row in rows[1:]:
result.append(dict(zip(rows[0], row)))
to_source(rows)
| 1.65625 | 2 |
src/pair_statistics.py | neherlab/SVVC | 3 | 84228 | from create_allele_counts import get_primer_intervals
def pair_counts(sam_fname, paired=False, qual_min=30, max_reads=-1,
max_isize = 700, VERBOSE = 0,
fwd_primer_regions = None, rev_primer_regions = None):
'''
'''
import numpy as np
import pysam
from collections import defaultdict
from itertools import combinations
a = {(True,True):0, (True, False):0, (False, True):0, (False, False):0}
c = {'R':0, 'L':0, 'N':0, 'E':0}
nuc_alpha = np.array(['A', 'C', 'G', 'T'], dtype='S1')
# Open BAM or SAM file
with pysam.Samfile(sam_fname) as samfile:
ac = []
acc = []
refs = {}
read_count = 0
for nref in range(samfile.nreferences):
if VERBOSE: print(("allocating for:", samfile.getrname(nref), "length:", samfile.lengths[nref]))
refs[nref]=samfile.getrname(nref)
ac.append((samfile.getrname(nref), np.zeros((len(nuc_alpha),samfile.lengths[nref]), dtype =int)))
acc.append((samfile.getrname(nref), {}))
while True:
# find read pairs and skip secondary or supplementary alignments
try:
read1 = next(samfile)
while read1.is_secondary or read1.is_supplementary:
read1 = next(samfile)
read2 = next(samfile)
while read2.is_secondary or read2.is_supplementary:
read2 = next(samfile)
except:
break
if read1.is_unmapped or read2.is_unmapped or np.abs(read1.isize)>max_isize:
continue
if (read1.is_reverse==read2.is_reverse):
continue
if (read1.qname!=read2.qname):
continue
read_count+=1
if read_count%1000==0:
print(read_count)
if max_reads>0 and read_count>max_reads:
break
ref_name = refs[read1.rname]
# determine which read maps to the 5p and which one the 3p end
# pull out only positions that map, indels will be ignored in cocounts
if read2.is_reverse:
aln1 = np.array(read1.get_aligned_pairs(matches_only=True))
aln2 = np.array(read2.get_aligned_pairs(matches_only=True))
seq1 = np.fromstring(read1.seq, 'S1')[aln1[:,0]]
qual1 = np.fromstring(read1.qual, np.int8)[aln1[:,0]] - 33
seq2 = np.fromstring(read2.seq, 'S1')[aln2[:,0]]
qual2 = np.fromstring(read2.qual, np.int8)[aln2[:,0]] - 33
else:
aln2 = np.array(read1.get_aligned_pairs(matches_only=True))
aln1 = np.array(read2.get_aligned_pairs(matches_only=True))
seq1 = np.fromstring(read2.seq, 'S1')[aln1[:,0]]
qual1 = np.fromstring(read2.qual, np.int8)[aln1[:,0]] - 33
seq2 = np.fromstring(read1.seq, 'S1')[aln2[:,0]]
qual2 = np.fromstring(read1.qual, np.int8)[aln2[:,0]] - 33
isize = np.abs(read1.isize)
L1 = aln1.shape[0]
L2 = aln2.shape[0]
## merge reads
# allocate vectors
merged_qual = np.zeros(isize, dtype=int)
merged_seq = np.zeros(isize, dtype='S1')
merged_pos = np.zeros((isize,2), dtype=int)
# handle edge cases where one read in contained in the other,
# i.e. the 5p read extends for longer than the 3p end of the 3p read
# This can result for example from quality trimming.
leftoverhang = aln1[0,1] - aln2[0,1]
rightoverhang = aln1[-1,1] - aln2[-1,1]
if leftoverhang>0: # take only the better read2
merged_pos=aln2
merged_qual=qual2
merged_seq=qual2
c['L']+=1
elif rightoverhang>0: # take only the better read1
merged_pos=aln1
merged_qual=qual1
merged_seq=qual1
c['R']+=1
else: # proper merging happens here
# difference between end of aln1 and beginning of aln2 is overlap on reference
overlap = max(0, aln1[-1,1] - aln2[0,1]+1)
c['N']+=1
# note that the exact coordinates might be off bc of indels
# but what we are doing is conservate and only mapped positions
# will be reported
seg1 = L1 - overlap # end of non-overlap segment
seg3 = isize - L2 + overlap # beginnning of non-overlap segment
if seg1>0:
merged_pos[:seg1] = aln1[:seg1]
merged_qual[:seg1] = qual1[:seg1]
merged_seq[:seg1] = seq1[:seg1]
else:
seg1=0
merged_pos[seg3:] = aln2[overlap:]
merged_qual[seg3:] = qual2[overlap:]
merged_seq[seg3:] = seq2[overlap:]
if overlap:
try:
seq_agree = (seq1[seg1:]==seq2[:overlap])&(aln1[seg1:,1]==aln2[:overlap,1])
better = qual1[seg1:]<qual2[:overlap]
from1 = np.where(seq_agree&better)[0]
from2 = np.where(seq_agree&(~better))[0]
merged_pos[seg1 + from1] = aln1[seg1 + from1]
merged_qual[seg1 + from1] = qual1[seg1 + from1]
merged_seq[seg1+from1] = seq1[seg1+from1]
merged_pos[seg1 + from2] = aln2[from2]
merged_qual[seg1 + from2] = qual2[from2]
merged_seq[seg1+from2] = seq2[from2]
except:
c['E']+=1
continue
# mask regions in the merged read that likely derive from primer sequence
not_primer = np.ones_like(merged_seq, 'bool')
if rev_primer_regions:
read_end = merged_pos[-1,1]
for b,e in rev_primer_regions[ref_name]:
p_length = e-b
if read_end-b>0 and read_end-b<p_length:
not_primer[-(read_end-b):]=False
break
if fwd_primer_regions:
read_start = merged_pos[0,1]
for b,e in fwd_primer_regions[ref_name]:
p_length = e-b
if read_start-b>0 and read_start-b<p_length:
not_primer[:e-read_start]=False
break
counts = ac[read1.rname][1]
cocounts = acc[read1.rname][1]
good_ind = (merged_qual>qual_min)¬_primer
for ni,nuc in enumerate(nuc_alpha):
correct_state = merged_seq==nuc
counts[ni,merged_pos[correct_state&good_ind,1]] += 1
combo = list(zip(merged_pos[good_ind], merged_seq[good_ind]))
for (p1, n1), (p2,n2) in combinations(combo, 2):
posp = (p1[1], p2[1])
p = n1+n2
if posp not in cocounts:
cocounts[posp]={p:1}
continue
if p not in cocounts[posp]:
cocounts[posp][p]=1
else:
cocounts[posp][p]+=1
return ac, acc
if __name__ == '__main__':
import argparse, gzip
import pickle as pickle
parser = argparse.ArgumentParser(description='create pair counts',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--bam_file',
help='bam file to pile up')
parser.add_argument('--out_dir',
help='directory to save results')
parser.add_argument('--max_reads', type=int,default=-1,
help='maximum number of reads to process')
parser.add_argument('--primers', type=str, help='file with primers to mask in pile up')
args = parser.parse_args()
fwd_primer_intervals, rev_primer_intervals = get_primer_intervals(args.primers)
print((fwd_primer_intervals, rev_primer_intervals))
ac, acc = pair_counts(args.bam_file, qual_min=30, VERBOSE=3, max_isize = 600, paired=True, max_reads=args.max_reads,
fwd_primer_regions = fwd_primer_intervals, rev_primer_regions = rev_primer_intervals)
acc_renamed = []
for refname, counts in acc:
acc_renamed.append((refname.replace('/', '_'), counts))
acc = acc_renamed
ac_renamed = []
for refname, counts in ac:
ac_renamed.append((refname.replace('/', '_'), counts))
ac = ac_renamed
with gzip.open(args.out_dir+'/pair_counts.pkl.gz', 'w') as fh:
pickle.dump((ac,acc), fh)
| 1.648438 | 2 |
atlabs/tests/test_module.py | fsi-sandbox/fsi-sdk-python | 1 | 84356 | import unittest
from unittest.mock import Mock, patch
from .common import header, body, responses, R
from atlabs.sms import Sms
from atlabs.token import Token
from atlabs.voice import Voice
from atlabs.airtime import Airtime
@patch('requests.post')
def test_create_checkout_token(mock_post):
data = responses['CreateCheckoutToken']
mock_post.return_value = R(data)
assert Token(header).CreateCheckoutToken(
body["CreateCheckoutToken"]) == data, "should return an object"
@patch('requests.post')
def test_create_premium_subscription(mock_post):
data = responses['CreatePremiumSubscription']
mock_post.return_value = R(data)
assert Sms(header).CreatePremiumSubscription(
body["CreatePremiumSubscription"]) == data, "should return an object"
@patch('requests.post')
def test_delete_premium_subscription(mock_post):
data = responses['DeletePremiumSubscription']
mock_post.return_value = R(data)
assert Sms(header).DeletePremiumSubscription(
body["DeletePremiumSubscription"]) == data, "should return an object"
@patch('requests.post')
def test_fetch_message(mock_post):
data = responses['FetchMessage']
mock_post.return_value = R(data)
assert Sms(header).FetchMessage(
body["FetchMessage"]) == data, "should return an object"
@patch('requests.post')
def test_fetch_premium_subscription(mock_post):
data = responses['FetchPremiumSubscription']
mock_post.return_value = R(data)
assert Sms(header).FetchPremiumSubscription(
body["FetchPremiumSubscription"]) == data, "should return an object"
@patch('requests.post')
def test_media_upload(mock_post):
data = responses['MediaUpload']
mock_post.return_value = R(data)
assert Voice(header).MediaUpload(
body["MediaUpload"]) == data, "should return an object"
@patch('requests.post')
def test_queue_status(mock_post):
data = responses['QueueStatus']
mock_post.return_value = R(data)
assert Voice(header).QueueStatus(
body["QueueStatus"]) == data, "should return an object"
@patch('requests.post')
def test_send_airtime(mock_post):
data = responses['SendAirtime']
mock_post.return_value = R(data)
assert Airtime(header).SendAirtime(
body["SendAirtime"]) == data, "should return an object"
@patch('requests.post')
def test_send_message(mock_post):
data = responses['SendMessage']
mock_post.return_value = R(data)
assert Sms(header).SendMessage(
body["SendMessage"]) == data, "should return an object"
@patch('requests.post')
def test_send_premium_message(mock_post):
data = responses['SendPremiumMessage']
mock_post.return_value = R(data)
assert Sms(header).SendPremiumMessage(
body["SendPremiumMessage"]) == data, "should return an object"
@patch('requests.post')
def test_voice_call(mock_post):
data = responses['VoiceCall']
mock_post.return_value = R(data)
assert Voice(header).VoiceCall(
body["VoiceCall"]) == data, "should return an object"
| 1.398438 | 1 |
src/timeCalculator/__init__.py | SandunWebDev/py-multi-toolbox | 0 | 84484 | <gh_stars>0
# flake8: noqa
from timeCalculator.calculator import add_time
| 0.222656 | 0 |
Practice-Python/Ex10-List-Overlap-Comprehensions.py | Neil-Iyer/Basic-Exercises-in-Python | 0 | 84612 | <filename>Practice-Python/Ex10-List-Overlap-Comprehensions.py
''' You can find this exercise at the following website: https://www.practicepython.org/
10. List Overlap Comprehensions:
This week’s exercise is going to be revisiting an old exercise (see Exercise 5), except require the solution in a different way.
Take two lists, say for example these two:
a = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]
b = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
and write a program that returns a list that contains only the elements that are common between the lists (without duplicates). Make sure your program works on two lists of different sizes.
Extra:
Randomly generate two lists to test this '''
a = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]
b = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
resultRepetitions = [element for element in a if element in b]
noRepetitions = []
for i in resultRepetitions:
if i not in noRepetitions:
noRepetitions.append(i)
# Same thing in one line:
# [noRepetitions.append(i) for i in resultRepetitions if i not in noRepetitions]
# See Exercise 14 for functions to remove duplicates!
print("List 1: " + str(a))
print("List 2: " + str(b))
print("List of numbers in both lists with repetitions:" + str(resultRepetitions))
print("List of numbers in both lists without repetitions:" + str(noRepetitions))
# To see how to randomly generate lists, refer to exercise 5 - List Overlap | 3.140625 | 3 |
silk/Silk.py | sjdv1982/silk | 0 | 84740 | import inspect, sys, traceback
from types import MethodType
from copy import copy, deepcopy
import numpy as np
from .SilkBase import SilkBase, compile_function
from .validation import (
schema_validator,
Scalar, scalar_conv, _types, infer_type, is_numpy_structure_schema, ValidationError
)
allowed_types = tuple(_types.values())
from .policy import default_policy as silk_default_policy
_underscore_attribute_names = set(["__array_struct__", "__array_interface__", "__array__"])
# A set of magic names where it is expected that they raise NotImplementedError if
# not implemented, rather than returning NotImplemented
_underscore_attribute_names2 = set(["__deepcopy__"])
# A set of magic names where it is expected that they raise AttributeError if
# not implemented, rather than returning NotImplemented
def hasattr2(obj, attr):
try:
getattr(obj, attr)
return True
except (AttributeError, KeyError):
return False
def init_object_schema(silk, schema):
if "type" in schema:
assert schema["type"] == "object"
if "properties" not in schema:
schema["properties"] = {}
return schema["properties"]
schema["type"] = "object"
result = {}
schema["properties"] = result
return result
class RichValue:
value = None
form = None
storage = None
schema = None
_has_form = False
def __init__(self, value, need_form=False):
if isinstance(value, Wrapper):
value = value._unwrap()
if isinstance(value, Silk):
self.schema = value._schema
value = value._data
if isinstance(value, Wrapper):
value = value._unwrap()
if isinstance(value, FormWrapper):
self._form = value._form
self._storage = value._storage
value = value._wrapped
self._has_form = True
elif need_form:
self._storage, self._form = get_form(value)
self._has_form = True
self.value = value
@property
def form(self):
assert self._has_form
return self._form
@property
def storage(self):
assert self._has_form
return self._storage
def _unwrap_all(value):
if isinstance(value, Wrapper):
value = value._unwrap()
class AlmostDict(dict):
"""Dict subclass that returns a fixed items() instead of an iterator
This is because a schema may change during validation by jsonschema
and a normal dict will give a RuntimeError because of this
"""
def items(self):
return list(dict.items(self))
class Silk(SilkBase):
__slots__ = [
"_data", "_schema", "_parent",
"_parent_attr",
"_self_mode", "_default_policy"
]
def __init__(self, *,
data=None, schema=None,
parent=None, _parent_attr=None,
default_policy=None,
_self_mode=False,
):
assert parent is None or isinstance(parent, Silk)
self._parent = parent
self._parent_attr = _parent_attr
assert isinstance(data, allowed_types) \
or isinstance(data, (Wrapper, FormWrapper)), type(data)
self._data = data
if schema is None:
schema = {}
assert isinstance(schema, allowed_types) \
or isinstance(schema, Wrapper)
self._schema = schema
self._default_policy = default_policy
self._self_mode = _self_mode
def __call__(self, *args, **kwargs):
data = self._data
schema = self._schema
methods = schema.get("methods", {})
methods = RichValue(methods).value
if data is None:
constructor_code = methods.get("__init__", None)
if constructor_code is None:
raise AttributeError("__init__")
name = "Silk __init__"
try:
constructor = compile_function(constructor_code, name)
except Exception as exc:
traceback.print_exc()
raise exc from None
instance = Silk(
data=None,
schema=self._schema,
default_policy=self._default_policy
)
result = constructor(instance, *args, **kwargs)
assert result is None # __init__ must return None
return instance
else:
call_code = methods.get("__call__", None)
if call_code is None:
raise AttributeError("__call__")
name = "Silk __call__"
try:
call = compile_function(call_code, name)
except Exception as exc:
traceback.print_exc()
raise exc from None
return call(self, *args, **kwargs)
def _get_policy(self, schema, default_policy=None):
policy = schema.get("policy")
policy = RichValue(policy).value
if policy is None or not len(policy):
#TODO: implement lookup hierarchy wrapper that also looks at parent
if default_policy is None:
if self._default_policy is not None:
default_policy = self._default_policy
else:
default_policy = silk_default_policy
policy = default_policy
elif len(policy.keys()) < len(silk_default_policy.keys()):
policy0 = policy
policy = deepcopy(silk_default_policy)
policy.update(policy0)
return policy
#***************************************************
#* methods for inference
#***************************************************
def _is_binary_array(self):
schema = self._schema
storage = None
if "storage" not in schema:
return False
if schema["storage"] not in ("binary", "mixed-binary", "pure-binary"):
return False
if "form" not in schema:
return False
if "ndim" not in schema["form"]:
return False
if "type" not in schema or schema["type"] != "array":
return False
return True
def _is_binary_array_item(self):
if self._parent is None:
return False
if self._parent._is_binary_array():
return True
schema = self._schema
storage = None
if "storage" in schema:
storage = schema["storage"]
if storage in ("plain", "mixed-plain", "pure-plain"):
return False
return self._parent._is_binary_array_item()
def _infer_new_property(self, schema, attr, value, value_schema=None):
policy = self._get_policy(schema)
if not policy["infer_new_property"]:
return False
self._infer_type(schema, policy, {})
if "properties" not in schema:
schema["properties"] = {}
if attr not in schema["properties"]:
if value_schema is None:
value_schema = {}
subschema = deepcopy(value_schema)
new_subschema = True
else:
subschema = schema["properties"][attr]
new_subschema = False
subpolicy = self._get_policy(subschema, policy)
dummy = Silk(schema=subschema, parent=self)
dummy._infer(subpolicy, RichValue(value))
if new_subschema:
schema["properties"][attr] = subschema
def _infer_object(self, schema, policy, rich_value):
assert isinstance(rich_value, RichValue)
value = rich_value.value
value_schema = rich_value.schema
if not policy["infer_object"]:
return False
self._infer_type(schema, policy, value)
if "properties" not in schema:
schema["properties"] = {}
if isinstance(value, dict):
items = value.items()
else: #struct
items = []
for field in value.dtype.fields:
subvalue = value[field]
items.append((field, subvalue))
if value_schema is None:
value_schema = {}
value_schema_props = value_schema.get("properties", {})
for attr, subvalue in items:
if attr not in schema["properties"]:
sub_value_schema = value_schema_props.get(attr, {})
schema["properties"][attr] = deepcopy(sub_value_schema)
subschema = schema["properties"][attr]
subpolicy = self._get_policy(subschema, policy)
dummy = Silk(schema=subschema, parent=self)
dummy._infer(subpolicy, RichValue(subvalue))
def _infer_new_item(self, schema, index, value, value_item_schema=None):
if self._is_binary_array_item():
return False
policy = self._get_policy(schema)
if not policy["infer_new_item"]:
return False
self._infer_type(schema, policy, {})
if "items" not in schema:
if value_item_schema is not None:
item_schema = deepcopy(value_item_schema)
else:
item_schema = {}
dummy = Silk(schema=item_schema, parent=self)
dummy._infer(policy, RichValue(value))
if policy["infer_array"] == "pluriform" and index == 0:
item_schema = [item_schema]
schema["items"] = item_schema
else:
item_schemas = schema["items"]
new_item_schema = None
if isinstance(item_schemas, list):
if value_item_schema is not None:
new_item_schema = deepcopy(value_item_schema)
else:
if index < len(item_schemas):
curr_item_schema = item_schemas[index]
else:
new_item_schema = {}
curr_item_schema = new_item_schema
dummy = Silk(schema=curr_item_schema,parent=self)
dummy._infer(policy, RichValue(value))
insert = True
if new_item_schema is not None:
for n in range(len(item_schemas), index):
item_schemas.append({})
item_schemas.insert(index, new_item_schema)
else: #single schema, no inference
pass
def _infer_array(self, schema, policy, rich_value):
assert isinstance(rich_value, RichValue)
value = rich_value.value
self._infer_type(schema, policy, value)
value_schema = rich_value.schema
if isinstance(value, (list, tuple)):
storage = "plain"
elif isinstance(value, np.ndarray):
storage = "binary"
else:
raise TypeError(value)
if policy["infer_storage"]:
schema["storage"] = storage
if storage == "binary":
if any((
policy["infer_array"],
policy["infer_ndim"],
policy["infer_shape"],
policy["infer_strides"]
)):
if "form" not in schema:
schema["form"] = {}
form_schema = schema["form"]
if policy["infer_ndim"]:
form_schema["ndim"] = value.ndim
if policy["infer_strides"]:
contiguous = is_contiguous(value)
if contiguous:
form_schema["contiguous"] = True
form_schema.pop("strides", None)
else:
form_schema.pop("contiguous", None)
form_schema["strides"] = value.strides
if policy["infer_shape"]:
form_schema["shape"] = value.shape
if not policy["infer_array"]:
return
if "items" not in schema:
value_item_schema = None
if value_schema is not None:
value_item_schema = value_schema.get("items")
if value_item_schema is not None:
schema["items"] = deepcopy(value_item_schema)
else:
bytesize = None
first_item_type = None
unsigned = None
if storage == "binary":
#TODO: only if parent does not have ndim...
if policy["infer_type"] and value.ndim > 1:
first_item_type = infer_type(value.flat[0])
if first_item_type == "integer":
unsigned = is_unsigned(value.dtype)
if policy["infer_array"] and policy["infer_storage"]:
bytesize = value.itemsize
if len(value):
pluriform = False
item_schema = {}
dummy = Silk(schema=item_schema,parent=self)
dummy._infer(policy, RichValue(value[0]))
if policy["infer_array"] == "pluriform":
pluriform = True
elif storage == "binary" and is_numpy_structure_schema(schema):
#fastest, if we can skip validation altogether
#requires that the schema is a numpy structure schema.
pass
else:
# Not too slow (10**5 per sec).
# Much better than constructing and validating
# an explicit Silk object!
validator = schema_validator(item_schema)
for n in range(1, len(value)):
try:
validator.validate(value[n])
except Exception:
pluriform = True
break
if pluriform:
item_schemas = [item_schema]
for n in range(1, len(value)):
item_schemas.append({})
dummy = Silk(schema=item_schemas[n],parent=self)
dummy._infer(policy, RichValue(value[n]))
if bytesize is not None:
for item_schema in item_schemas:
if "form" not in item_schema:
item_schema["form"] = {}
item_schema["form"]["bytesize"] = bytesize
if first_item_type is not None:
for item_schema in item_schemas:
if "form" not in item_schema:
item_schema["form"] = {}
item_schema["form"]["type"] = first_item_type
if unsigned is not None:
item_schema["form"]["unsigned"] = unsigned
schema["items"] = item_schemas
else:
if bytesize is not None:
if "form" not in item_schema:
item_schema["form"] = {}
item_schema["form"]["bytesize"] = bytesize
if first_item_type is not None:
if "form" not in item_schema:
item_schema["form"] = {}
item_schema["form"]["type"] = first_item_type
if unsigned is not None:
item_schema["form"]["unsigned"] = unsigned
schema["items"] = item_schema
def _infer_type(self, schema, policy, value):
if policy["infer_type"]:
if "type" not in schema:
type_ = infer_type(value)
if type_ != "null":
schema["type"] = type_
if isinstance(value, np.number):
if "form" not in schema:
schema["form"] = {}
form = schema["form"]
if "bytesize" not in form:
form["bytesize"] = value.itemsize
def _infer(self, policy, rich_value):
assert isinstance(rich_value, RichValue)
schema = self._schema
value = rich_value.value
if self._is_binary_array_item():
if not isinstance(value, np.ndarray):
return self._infer_type(schema, policy, value)
else:
return False
self._infer_type(schema, policy, value)
if "type" in schema:
if schema["type"] == "object":
self._infer_object(schema, policy, rich_value)
elif schema["type"] == "array":
self._infer_array(schema, policy, rich_value)
#***************************************************
#* methods for setting
#***************************************************
def _set_value_simple(self, value):
assert self._parent is None or self._parent_attr is not None
if self._parent is not None:
rich_value = RichValue(value)
value, value_schema = rich_value.value, rich_value.schema
self._parent._setitem(self._parent_attr, value, value_schema)
elif isinstance(self._data, Wrapper):
self._data.set(value)
else:
self._data = value
def _set_value_dict(self, value):
assert self._parent is None or self._parent_attr is not None
if self._parent is not None:
rich_value = RichValue(value)
value, value_schema = rich_value.value, rich_value.schema
self._parent._setitem(self._parent_attr, value, value_schema)
return self._data
data = self._data
"""
try:
raw_data = self._raw_data()
is_none = (raw_data is None)
except ValueError:
is_none = True
if is_none or not isinstance(raw_data, dict) or not isinstance(value, dict):
self._set_value_simple(value)
else:
data.clear()
data.update(value)
"""
self._set_value_simple(value)
return self._data
def _set(self, value, lowlevel):
rich_value = RichValue(value)
value = rich_value.value
value_schema = rich_value.schema
def _get_schema():
schema = self._schema
updated = False
if test_none(schema) and value_schema is not None:
if schema is None:
schema = value_schema
super().__setattr__(self, "_schema", schema)
else:
schema.update(value_schema)
return schema
if not lowlevel:
schema = _get_schema()
policy = self._get_policy(schema)
self._infer_type(schema, policy, value)
try:
raw_data = self._raw_data()
is_none = (raw_data is None)
except ValueError:
is_none = True
if isinstance(value, Scalar):
self._set_value_simple(value)
if not lowlevel:
if value_schema is not None:
schema.update(deepcopy(value_schema))
elif isinstance(value, _types["array"]):
#invalidates all Silk objects constructed from items
if is_none:
self._set_value_simple(value)
is_empty = True
else:
is_empty = (len(raw_data) == 0)
data = self._data
if isinstance(data, Wrapper):
data.set(value)
else:
data[:] = value
if is_empty and not lowlevel:
self._infer_array(schema, policy, rich_value)
elif isinstance(value, (dict, np.generic)):
#invalidates all Silk objects constructed from items
if is_none:
is_empty = True
else:
try:
is_empty = (len(raw_data) == 0)
except:
is_empty = True
self._set_value_dict(value)
schema = _get_schema()
policy = self._get_policy(schema)
if is_empty and not lowlevel:
self._infer_object(schema, policy, rich_value)
else:
raise TypeError(type(value))
def set(self, value):
self._set(value, lowlevel=False)
return self
def _setitem(self, attr, value, value_schema):
data = self._data
schema = self._schema
policy = self._get_policy(schema)
try:
raw_data = self._raw_data()
except ValueError:
raw_data = None
if raw_data is None:
base = [] if isinstance(attr, int) else {}
self._set_value_simple(base)
self._infer_type(schema, policy, base)
data = self._data
data[attr] = value
if isinstance(attr, int):
self._infer_new_item(schema, attr, value, value_schema)
else:
self._infer_new_property(schema, attr, value, value_schema)
def __setattr__(self, attr, value):
#print("_s", attr, value)
if attr in type(self).__slots__:
return super().__setattr__(attr, value)
if hasattr(type(self), attr) and not attr.startswith("__"):
raise AttributeError(attr) #Silk method
if attr in ("data", "schema", "unsilk"):
raise AttributeError
if isinstance(value, property):
return self._set_property(attr, value)
if not isinstance(value, Silk) and callable(value):
return self._set_method(attr, value)
rich_value = RichValue(value)
value, value_schema = rich_value.value, rich_value.schema
schema = self._schema
methods = schema.get("methods", {})
methods = RichValue(methods).value
m = methods.get(attr, None)
if m is not None:
if m.get("property", False):
setter = m.get("setter", None)
if setter is not None:
mm = {"code": setter, "language": m["language"]}
name = "Silk .%s setter" % attr
try:
fset = compile_function(mm, name)
except Exception as exc:
traceback.print_exc()
raise exc from None
fset(self, value)
else:
raise TypeError(attr) #read-only property cannot be assigned to
else:
raise TypeError(attr) #method cannot be assigned to
else:
self._setitem(attr, value, value_schema)
def __setitem__(self, item, value):
rich_value = RichValue(value)
value, value_schema = rich_value.value, rich_value.schema
self._setitem(item, value, value_schema)
def _set_property(self, attribute, prop):
assert (not attribute.startswith("_")) or attribute.startswith("__"), attribute
assert isinstance(prop, property)
m = {"property": True, "language": "python"}
getter_code = inspect.getsource(prop.fget)
m["getter"] = getter_code
mm = {"code": getter_code, "language": "python"}
name = "Silk .%s getter" % attribute
try:
compile_function(mm, name, mode="property-getter")
except Exception as exc:
traceback.print_exc()
raise exc from None
if prop.fset is not None:
setter_code = inspect.getsource(prop.fset)
m["setter"] = setter_code
mm = {"code": setter_code, "language": "python"}
name = "Silk .%s setter" % attribute
try:
compile_function(mm, name)
except Exception as exc:
traceback.print_exc()
raise exc from None
# TODO: deleter
schema = self._schema
methods = schema.get("methods", None)
if methods is None:
methods = {}
schema["methods"] = methods
methods = schema["methods"] # to get back-end working properly
methods[attribute] = m
"""
def _schema_get(self, attribute):
child = self.schema.get(attribute, None)
if child is None:
props = self.schema.get("properties")
if props is None:
raise AttributeError(attribute)
child = props.get(attribute)
if child is None:
raise AttributeError(attribute)
return child
"""
def _set_method(self, attribute, func):
assert (not attribute.startswith("_")) or attribute.startswith("__"), attribute
assert callable(func)
code = inspect.getsource(func)
m = {"code": code, "language": "python"}
name = "Silk .%s" % attribute
try:
compile_function(m, name)
except Exception as exc:
traceback.print_exc()
raise exc from None
schema = self._schema
methods = schema.get("methods", None)
if methods is None:
methods = {}
schema["methods"] = methods
methods = schema["methods"] # to get back-end working properly
methods[attribute] = m
def add_validator(self, func, name=None, *, attr=None):
assert callable(func)
code = inspect.getsource(func)
schema = self.schema
validators = schema.get("validators", None)
if validators is None:
l = 1
else:
l = len(validators) + 1
v = {"code": code, "language": "python"}
func_name = "Silk validator %d" % l
if name is not None:
v["name"] = name
func_name = name
try:
compile_function(v, func_name)
except Exception as exc:
traceback.print_exc()
raise exc from None
if isinstance(attr, int):
items_schema = schema.get("items", None)
if items_schema is None:
#TODO: check for uniform/pluriform
items_schema = {}
schema["items"] = items_schema
schema = items_schema
elif isinstance(attr, str):
prop_schema = schema.get("properties", None)
if prop_schema is None:
prop_schema = init_object_schema(self, schema)
attr_schema = prop_schema.get(attr, None)
if attr_schema is None:
attr_schema = {}
prop_schema[attr] = attr_schema
schema = attr_schema
new_validators = []
if validators is not None:
for validator in validators:
if name is None or validator.get("name") != name:
new_validators.append(validator)
new_validators.append(v)
schema["validators"] = new_validators
#***************************************************
#* methods for getting
#***************************************************
def _raw_data(self):
data = self._data
return RichValue(data).value
def _get_special(self, attr, *, skip_modify_methods=False):
if attr.startswith("_") and not attr.startswith("__"):
return super().__getattribute__(attr)
if not skip_modify_methods:
data2 = RichValue(self._data).value
is_modify_method, result = try_modify_methods(self, data2, attr)
if is_modify_method:
return result
data, schema = self._data, self._schema
if attr == "self":
return Silk(data = data,
schema = schema,
_self_mode=True,
parent = self._parent,
default_policy=self._default_policy,
_parent_attr=self._parent_attr
)
if self._self_mode:
raise AttributeError
methods = schema.get("methods", {})
methods = RichValue(methods).value
m = methods.get(attr, None)
if m is not None:
if m.get("property", False):
getter = m.get("getter", None)
if getter is not None:
mm = {"code": getter, "language": m["language"]}
name = "Silk .%s getter" % attr
try:
fget = compile_function(mm, name, "property-getter")
result = fget(self)
except Exception as exc:
traceback.print_exc()
raise exc from None
return result
else:
name = "Silk .%s" % attr
try:
method = compile_function(m, name)
except Exception as exc:
traceback.print_exc()
raise exc from None
return MethodType(method, self)
if attr != "set":
if skip_modify_methods:
if hasattr(type(data), attr):
return getattr(data, attr)
data2 = RichValue(data).value
if hasattr(type(data2), attr):
return getattr(data2, attr)
if attr.startswith("__"):
if attr in _underscore_attribute_names:
raise NotImplementedError
elif attr in _underscore_attribute_names2:
raise AttributeError(attr)
else:
return NotImplemented
raise AttributeError(attr)
def __getattribute__(self, attr):
if attr == "__class__" or attr in type(self).__slots__:
return super().__getattribute__(attr)
try:
return super().__getattribute__("_get_special")(attr)
except (TypeError, KeyError, AttributeError, IndexError) as exc:
if attr.startswith("_"):
raise AttributeError(attr) from None
if hasattr(type(self), attr):
return super().__getattribute__(attr)
if attr in ("data", "schema", "unsilk"):
if attr == "unsilk":
result = getattr(self, "_data")
else:
result = getattr(self, "_" + attr)
if attr in ("data", "unsilk"):
result = RichValue(result).value
return result
if self._self_mode:
raise exc from None
proto_ok = False
try:
from_proto = deepcopy(self._schema["__prototype__"][attr])
proto_ok = True
except KeyError:
pass
try:
return super().__getattribute__("_getitem")(attr)
except (TypeError, KeyError, AttributeError, IndexError):
if proto_ok:
return Silk(
data=from_proto,
default_policy=self._default_policy
)
raise AttributeError(attr) from None
except Exception:
if proto_ok:
return Silk(
data=from_proto,
default_policy=self._default_policy
)
raise exc from None
def __iter__(self):
data = RichValue(self._data).value
if isinstance(data, (list, tuple, np.ndarray)):
data_iter = range(len(data)).__iter__()
return SilkIterator(self, data_iter)
else:
data_iter = data.__iter__()
return data_iter
def _getitem(self, item):
data, schema = self._data, self._schema
attr_in_data = False
if isinstance(item, str):
if hasattr2(data, item):
attr_in_data = True
if isinstance(data, MixedBase):
if item in ("value", "storage", "form", "set"):
raise AttributeError(item)
if attr_in_data:
try:
result = getattr(data, item)
except AttributeError:
raise KeyError(item) from None
data2 = data
if isinstance(data, Wrapper):
data2 = data._unwrap()
if isinstance(data2, FormWrapper) and item in ("form", "storage"):
return result
d = result
if not isinstance(d, allowed_types) and not isinstance(d, (Wrapper, FormWrapper)):
d = data[item]
else:
d = data[item]
"""
if isinstance(d, Scalar):
return scalar_conv(d)
"""
if isinstance(item, slice):
# TODO: slice "items" schema if it is a list
return Silk(
parent=self,
data=d,
schema=schema,
default_policy=self._default_policy,
_parent_attr=item,
)
if isinstance(item, int):
schema_items = schema.get("items", None)
child_schema = None
if schema_items is None:
schema_items = {}
schema["items"] = schema_items
elif isinstance(schema_items, list):
child_schema = schema_items[item]
else:
schema_props = schema.get("properties", None)
if schema_props is None:
if "items" in schema:
raise AttributeError
schema_props = init_object_schema(self, schema)
child_schema = schema_props.get(item, None)
if child_schema is None:
child_schema = {}
schema_props[item] = child_schema
result = Silk(
parent=self,
data=d,
schema=child_schema,
default_policy=self._default_policy,
_parent_attr=item,
)
return result
def __getitem__(self, item):
if isinstance(item, str):
try:
return self._getitem(item)
except (TypeError, KeyError, AttributeError) as exc:
try:
return self._get_special(item)
except (TypeError, KeyError, AttributeError) as exc2:
raise exc2 from None
else:
raise exc from None
else:
return self._getitem(item)
def _validate(self):
need_form = True # TODO: detect "form" in schema, i.e. if validator_form will ever be triggered
rich_value = RichValue(self._data, need_form)
data = FormWrapper(
rich_value.value,
rich_value.form,
rich_value.storage
)
schema = RichValue(self._schema).value
schema = AlmostDict(schema)
schema_validator(schema).validate(data)
def validate(self, full=True):
assert full in (True, False, None), full
#print("Silk.validate", self, self._parent, full)
if full != True:
if full is None:
self._validate()
else:
schema = self._schema
validators = schema.get("validators", [])
validators = RichValue(validators).value
if len(validators):
for v, validator_code in enumerate(validators):
name = "Silk validator %d" % (v+1)
try:
validator_func = compile_function(validator_code, name)
except Exception as exc:
traceback.print_exc()
raise exc from None
try:
validator_func(self)
except Exception as exc:
tb = traceback.format_exc(limit=3)
raise ValidationError("\n"+tb) from None
if self._parent is not None:
self._parent.validate(full=False)
elif self._parent is not None:
self._parent.validate()
else:
self._validate()
class SilkIterator:
def __init__(self, silk, item_iterator):
self.silk = silk
self.item_iterator = item_iterator
def __next__(self):
next_item = self.item_iterator.__next__()
return self.silk[next_item]
from .modify_methods import try_modify_methods
from .mixed import MixedBase, is_contiguous, is_unsigned
from .mixed.get_form import get_form
from .validation.formwrapper import FormWrapper
from . import Wrapper
from . import test_none | 1.484375 | 1 |
sendAndReceive example.py | capella-ben/microPython_MCP2515 | 0 | 84868 | from canio import Message, RemoteTransmissionRequest
from MCP2515 import MCP2515 as CAN
from time import sleep_ms
NODE_ID = 0x1234ABCD
#NODE_ID = 0xAA
can_bus = CAN(0, 5, baudrate = 25000, debug=True)
#print('Listening...')
i = 0
while True:
with can_bus.listen(timeout=1.0) as listener:
message = Message(id=NODE_ID, data=str(i).encode("utf-8"), extended=True)
send_success = can_bus.send(message)
print("Send success:", send_success)
sleep_ms(1000)
i = i + 1
"""
message = Message(id=0xAAAA, data=str(i).encode("utf-8"), extended=True)
send_success = can_bus.send(message)
print("Send success:", send_success)
sleep_ms(1000)
i = i + 1
"""
"""
message_count = listener.in_waiting()
if message_count == 0:
continue
print(message_count, "messages available")
for _i in range(message_count):
msg = listener.receive()
print("Message from ", hex(msg.id), "extended:", msg.extended)
if isinstance(msg, Message):
print("message data:", msg.data)
if isinstance(msg, RemoteTransmissionRequest):
print("RTR length:", msg.length)
print("")
"""
| 1.734375 | 2 |
paddlespatial/networks/sagnn.py | PaddlePaddle/PaddleSpatial | 38 | 84996 | <filename>paddlespatial/networks/sagnn.py
# -*-Encoding: utf-8 -*-
################################################################################
#
# Copyright (c) 2021 Baidu.com, Inc. All Rights Reserved
#
################################################################################
"""
Description: Spatial Adaptive Graph Convolutional Layer in the paper "Competitive analysis for points of interest".
Authors: lishuangli(<EMAIL>)
Date: 2021/09/24
"""
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import pgl
from pgl.nn import functional as GF
from pgl.sampling.custom import subgraph
class SpatialLocalAGG(nn.Layer):
"""
Desc:
Local aggregation layer for SA-GNN.
"""
def __init__(self, input_dim, hidden_dim, transform=False, activation=None):
"""
Desc:
__init__
Args:
input_dim: The dimension size of the input tensor
hidden_dim: The dimension size of the output tensor
transform: If transform is True, then the linear transformation is employed
activation: The activation for the output
"""
super(SpatialLocalAGG, self).__init__()
self.transform = transform
if self.transform:
self.linear = nn.Linear(input_dim, hidden_dim, bias_attr=False)
self.activation = activation
def forward(self, graph, feature, norm=True):
"""
Desc:
A step of forward the layer.
Args:
graph: pgl.Graph instance
feature: The node feature matrix with shape (num_nodes, input_size)
norm: If norm is not None, then the feature will be normalized by given norm.
If norm is None and self.norm is true, then we use lapacian degree norm.
Returns:
outputs: A tensor with shape (num_nodes, output_size)
"""
norm = GF.degree_norm(graph)
if self.transform:
feature = self.linear(feature)
feature = feature * norm
output = graph.send_recv(feature, "sum")
output = output * norm
if self.activation is not None:
output = self.activation(output)
return output
class SpatialOrientedAGG(nn.Layer):
"""
Desc:
Global aggregation layer for SA-GNN.
"""
def __init__(self, input_dim, hidden_dim, num_sectors, transform=False, activation=None):
"""
Desc:
__init__
Args:
input_dim: The dimension size of the input tensor
hidden_dim: The dimension size of the output tensor
num_sectors: The number of spatial sector
transform: If transform is True, then the linear transformation is employed
activation: The activation for the output
"""
super(SpatialOrientedAGG, self).__init__()
self.num_sectors = num_sectors
linear_input_dim = hidden_dim * (num_sectors + 1) if transform else input_dim * (num_sectors + 1)
self.linear = nn.Linear(linear_input_dim, hidden_dim, bias_attr=False)
self.conv_layer = nn.LayerList()
for _ in range(num_sectors + 1):
conv = SpatialLocalAGG(input_dim, hidden_dim, transform, activation=lambda x: x)
self.conv_layer.append(conv)
def get_subgraphs(self, g):
"""
Desc:
Extract the subgraphs according to the spatial loction.
Args:
g: pgl.Graph instance
Returns:
outputs: A list of subgraphs (pgl.Graph instance)
"""
g = g.numpy()
subgraph_edge_list = [[] for _ in range(self.num_sectors + 1)]
coords = g.node_feat['coord'] # size: [num_poi, 2]
for src_node, dst_node in g.edges:
src_coord, dst_coord = coords[src_node], coords[dst_node]
rela_coord = dst_coord - src_coord
if rela_coord[0] == 0 and rela_coord[1] == 0:
sec_ind = 0
else:
rela_coord[0] += 1e-9
angle = np.arctan(rela_coord[1]/rela_coord[0])
angle = angle + np.pi * int(angle < 0)
angle = angle + np.pi * int(rela_coord[0] < 0)
sec_ind = int(angle / (np.pi / self.num_sectors))
sec_ind = min(sec_ind, self.num_sectors)
subgraph_edge_list[sec_ind] += [(src_node, dst_node)]
subgraph_list = []
for i in range(self.num_sectors + 1):
sub_g = subgraph(g, g.nodes, edges=subgraph_edge_list[i])
subgraph_list.append(sub_g.tensor())
return subgraph_list
def forward(self, graph, feature, norm=None):
"""
Desc:
A step of forward the layer.
Args:
graph: pgl.Graph instance
feature: The node feature matrix with shape (num_nodes, input_size)
norm: If norm is not None, then the feature will be normalized by given norm.
If norm is None and self.norm is true, then we use lapacian degree norm.
Returns:
outputs: A tensor with shape (num_nodes, output_size)
"""
subgraphs = self.get_subgraphs(graph)
h_list = []
for i in range(self.num_sectors + 1):
h = self.conv_layer[i](subgraphs[i], feature, norm)
h_list.append(h)
feat_h = paddle.concat(h_list, axis=-1)
feat_h = paddle.cast(feat_h, 'float32')
output = self.linear(feat_h)
return output
class SpatialAttnProp(nn.Layer):
"""
Desc:
Location-aware attentive propagation layer for SA-GNN.
"""
def __init__(self, input_dim, hidden_dim, num_heads, dropout, max_dist=10000, grid_len=100, activation=None):
super(SpatialAttnProp, self).__init__()
"""
Desc:
__init__
Args:
input_dim: The dimension size of the input tensor
hidden_dim: The dimension size of the output tensor
num_heads: The number of attention head
dropout: Dropout ratio
max_dist: The maximum distance range around each POI
grid_len: The length of segmented grid
activation: The activation for the output
"""
self.num_heads = num_heads
self.hidden_dim = hidden_dim
self.grid_len = grid_len
self.max_dist = max_dist
self.grid_num = int(max_dist / grid_len)
self.poi_fc = nn.Linear(input_dim, num_heads * hidden_dim)
self.loc_fc = nn.Linear(2 * hidden_dim, num_heads * hidden_dim)
self.x_embedding = nn.Embedding(2 * self.grid_num, hidden_dim, sparse=True)
self.y_embedding = nn.Embedding(2 * self.grid_num, hidden_dim, sparse=True)
self.weight_src = self.create_parameter(shape=[num_heads, hidden_dim])
self.weight_dst = self.create_parameter(shape=[num_heads, hidden_dim])
self.weight_loc = self.create_parameter(shape=[num_heads, hidden_dim])
self.feat_drop = nn.Dropout(p=dropout)
self.attn_drop = nn.Dropout(p=dropout)
self.leaky_relu = nn.LeakyReLU(negative_slope=0.2)
self.activation = activation
def attn_send_func(self, src_feat, dst_feat, edge_feat):
"""
Desc:
Sending function for message passing
Args:
src_feat: The feature of source POI node
dst_feat: The feature of destination POI node
edge_feat: The feature of edge between two POIs
Returns:
outputs: A dict of tensor
"""
alpha = src_feat["attn"] + dst_feat["attn"] + edge_feat['attn']
alpha = self.leaky_relu(alpha)
return {"alpha": alpha, "h": src_feat["h"]}
def attn_recv_func(self, msg):
"""
Desc:
Receiving function for message passing
Args:
msg: Message dict
Returns:
outputs: A tensor with shape (num_nodes, output_size)
"""
alpha = msg.reduce_softmax(msg["alpha"])
alpha = paddle.reshape(alpha, [-1, self.num_heads, 1])
alpha = self.attn_drop(alpha)
feature = msg["h"]
feature = paddle.reshape(feature, [-1, self.num_heads, self.hidden_dim])
feature = feature * alpha
feature = paddle.reshape(feature, [-1, self.num_heads * self.hidden_dim])
feature = msg.reduce(feature, pool_type="sum")
return feature
def calculate_loc_index(self, src_coord, dst_coord):
"""
Desc:
Calculte the grid index for loaction-aware attention
Args:
src_coord: Coordinate of source POI node
dst_coord: Coordinate of target POI node
Returns:
outputs: Two tensors with shape (num_edges, 1)
"""
x, y = paddle.split(dst_coord - src_coord, num_or_sections=2, axis=1)
x_inds = paddle.cast(paddle.abs(x)/self.grid_len, 'int64')
y_inds = paddle.cast(paddle.abs(y)/self.grid_len, 'int64')
x_inds = x_inds + self.grid_num * paddle.cast(x >= 0, 'int64')
y_inds = y_inds + self.grid_num * paddle.cast(y >= 0, 'int64')
x_inds = paddle.clip(x_inds, 0, 2 * self.grid_num - 1)
y_inds = paddle.clip(y_inds, 0, 2 * self.grid_num - 1)
return x_inds, y_inds
def forward(self, graph, feature):
"""
Desc:
A step of forward the layer.
Args:
graph: pgl.Graph instance
feature: The node feature matrix with shape (num_nodes, input_size)
Returns:
outputs: A tensor with shape (num_nodes, output_size)
"""
feature = self.feat_drop(feature)
poi_feat = self.poi_fc(feature)
poi_feat = paddle.reshape(poi_feat, [-1, self.num_heads, self.hidden_dim])
# calculate location feature
src_inds, dst_inds = paddle.split(graph.edges, num_or_sections=2, axis=1)
src_coord = paddle.gather(graph.node_feat['coord'], paddle.squeeze(src_inds))
dst_coord = paddle.gather(graph.node_feat['coord'], paddle.squeeze(dst_inds))
x_inds, y_inds = self.calculate_loc_index(src_coord, dst_coord)
x_emb = self.x_embedding(x_inds)
y_emb = self.y_embedding(y_inds)
loc_feat = self.loc_fc(paddle.concat([x_emb, y_emb], axis=-1))
loc_feat = paddle.reshape(loc_feat, [-1, self.num_heads, self.hidden_dim])
attn_src = paddle.sum(poi_feat * self.weight_src, axis=-1)
attn_dst = paddle.sum(poi_feat * self.weight_dst, axis=-1)
attn_loc = paddle.sum(loc_feat * self.weight_loc, axis=-1)
msg = graph.send(self.attn_send_func,
src_feat={"attn": attn_src, "h": poi_feat},
dst_feat={"attn": attn_dst},
edge_feat={'attn': attn_loc})
rst = graph.recv(reduce_func=self.attn_recv_func, msg=msg)
if self.activation:
rst = self.activation(rst)
return rst
| 1.992188 | 2 |
src/time_travel/patchers/base_patcher.py | meircif/time-travel | 38 | 85124 | """Base class for patching time and I/O modules."""
import sys
import inspect
class BasePatcher(object):
"""Base class for patching time and I/O modules."""
# These modules will not be patched by default, unless explicitly specified
# in `modules_to_patch`.
# This is done to prevent time-travel from interfering with the timing of
# the actual test environment.
UNPATCHED_MODULES = ['pytest', '_pytest', 'unittest', 'mock', 'threading']
def __init__(self,
clock,
event_pool,
modules_to_patch=None,
patcher_module=None):
"""Create the patch."""
self.clock = clock
self.event_pool = event_pool
if modules_to_patch is None:
self.modules_to_patch = []
elif isinstance(modules_to_patch, (list, tuple)):
self.modules_to_patch = modules_to_patch
else:
self.modules_to_patch = [modules_to_patch]
self.patcher_module = patcher_module if patcher_module else None
self._undo_set = set()
@classmethod
def get_events_namespace(cls):
"""Return the namespace of the patcher's events."""
return None
@classmethod
def get_events_types(cls):
"""Return Enum of the patcher's events types."""
return None
def get_patched_module(self):
"""Return the actual module obect to be patched."""
raise NotImplementedError()
def get_patch_actions(self):
"""Return list of the patches to do.
The list structure is tuples containing:
(real_object_name,
the_real_object,
fake_object)
"""
raise NotImplementedError()
def start(self):
"""Start the patcher.
The logic to the patchers start is based on the work done by:
spulec/freezegun
under
https://github.com/spulec/freezegun
Copyright (C) 2017 spulec/freezegun
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
Modifications:
Modifications to the file was to leave the inner change of the loaded
modules and removing any other related logic to a specific module.
"""
patch_actions = self.get_patch_actions()
real_id_to_fake = {id(real): fake for _, real, fake in patch_actions}
patched_module = self.get_patched_module()
# Change modules for later imports.
for obj_name, real_obj, fake_obj in patch_actions:
self._save_for_undo(patched_module, obj_name, real_obj)
setattr(patched_module, obj_name, fake_obj)
if self.modules_to_patch:
# If only a given list of modules is required to be patched
modules = [sys.modules[name] for name in self.modules_to_patch]
else:
# not given a specific module to patch on.
# Create the list of all modules to search for the patched objects.
# Patch on all loaded modules.
modules = [
module for mod_name, module in sys.modules.items() if
(inspect.ismodule(module)
and hasattr(module, '__name__')
# Don't patch inside the original module, this (the patcher)
# module, or the unpatched modules.
and module.__name__ not in ([patched_module,
self.patcher_module,
__name__]
+ self.UNPATCHED_MODULES
)
)
]
# Search in all modules for the object to patch.
for module in modules:
for attr in dir(module):
try:
# Get any attribute loaded on the module.
attribute_value = getattr(module, attr)
except (ValueError, AttributeError, ImportError):
# For some libraries, this happen.
# e.g. attr=dbm_gnu, module=pkg_resources._vendor.six.moves
continue
# If the attribute is on this module - avoid recursion.
# Do stuff only if the attribute is the object to patch.
if id(attribute_value) not in real_id_to_fake.keys():
continue
# Find the relative mock object for the original class.
fake_obj = real_id_to_fake.get(id(attribute_value))
# Change the class to the mocked one in the given module.
setattr(module, attr, fake_obj)
# Save the original class for later - when stopping the patch.
self._save_for_undo(module, attr, attribute_value)
def stop(self):
"""Stop the patching."""
for module, attribute, original_value in self._undo_set:
setattr(module, attribute, original_value)
self._undo_set.clear()
def _save_for_undo(self, module, attribute, original_value):
self._undo_set.add((module, attribute, original_value))
| 1.992188 | 2 |
django_gears/utils.py | wiserthanever/django-gears | 0 | 85252 | import warnings
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
from gears.asset_handler import BaseAssetHandler
from gears.finders import BaseFinder
_cache = {}
def _get_module(path):
try:
return import_module(path)
except ImportError as e:
raise ImproperlyConfigured('Error importing module %s: "%s".' % (path, e))
def _get_module_attr(module_path, name):
try:
return getattr(_get_module(module_path), name)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" obj.' % (module_path, name))
def _get_object(path):
if path not in _cache:
_cache[path] = _get_module_attr(*path.rsplit('.', 1))
return _cache[path]
def get_cache(path, options=None):
cls = _get_object(path)
return cls(**(options or {}))
def get_finder(path, options=None):
cls = _get_object(path)
if not issubclass(cls, BaseFinder):
raise ImproperlyConfigured('"%s" is not a subclass of BaseFinder.' % path)
return cls(**(options or {}))
def get_asset_handler(path, options=None):
obj = _get_object(path)
try:
if issubclass(obj, BaseAssetHandler):
return obj.as_handler(**(options or {}))
except TypeError:
pass
if callable(obj):
if options is not None:
warnings.warn('%r is provided as %r handler options, but not used '
'because this handler is not a BaseAssethandler subclass.'
% (options, path))
return obj
raise ImproperlyConfigured('"%s" must be a BaseAssetHandler subclass or callable object' % path)
| 1.429688 | 1 |
src/data/ReadNestedList.py | thonic92/chal_TM | 4 | 85380 | <filename>src/data/ReadNestedList.py
import pandas as pd
import ast
class ReadNestedList:
def __init__(self, tweets, target, name):
self.tweets = tweets
self.target = target
self.name = name
self.all_elements = list()
self.df = None
self.grpDF = None
def read(self):
for el in self.target:
el = ast.literal_eval(el)
if len(el) > 0:
self.all_elements.extend(el)
return self
def DF(self):
self.df = pd.DataFrame(self.all_elements, columns=[self.name])
self.df['{}_lower_case'.format(self.name)] = self.df[self.name].str.lower()
return self
def computeGrpDF(self):
self.grpDF = self.df.groupby('{}_lower_case'.format(self.name))['{}_lower_case'.format(self.name)].count().reset_index(name = 'count').sort_values(['count'], ascending=False)
return self
| 2 | 2 |
sanitizer/models.py | stewardshiptools/stewardshiptools | 0 | 85508 | from django.db import models
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
class SensitivePhraseAbstract(models.Model):
phrase = models.CharField(max_length=200)
replace_phrase = models.CharField(max_length=200, blank=True, null=True)
check_for_word_boundary_start = models.BooleanField(default=True)
check_for_word_boundary_end = models.BooleanField(default=True)
def __str__(self):
return self.phrase
class Meta:
abstract = True
class SensitivePhrase(SensitivePhraseAbstract):
pass
class Meta:
ordering = ('-id', 'phrase')
class RelatedSensitivePhrase(SensitivePhraseAbstract):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
obj = GenericForeignKey('content_type', 'object_id')
class Meta:
ordering = ('-id', 'phrase')
| 1.429688 | 1 |
RequestInfo.py | AndreyRub/LoveLetter | 0 | 85636 | class RequestInfo:
def __init__(self,
human_string, # human-readable string (will be printed for Human object)
action_requested, # action requested ('card' + index of card / 'opponent' / 'guess')
current_hand = [], # player's current hand
discard_pile = [], # discard pile
move_history = [], # list of player moves
players_active_status = [], # players' active status (active / lost)
players_protection_status = [], # players' protection status (protected / not protected)
invalid_moves = [], # invalid moves (optional) - an assist from Game
valid_moves = [], # valid moves (optional) - an assist from Game
players_active = [], # list of currently active players
players_protected = []): # Protection status of all players (including inactive ones)
self.human_string = human_string
self.action_requested = action_requested
self.current_hand = current_hand
self.discard_pile = discard_pile
self.move_history = move_history
self.players_active_status = players_active_status
self.players_protection_status = players_protection_status
self.invalid_moves = invalid_moves
self.valid_moves = valid_moves
self.players_active = players_active
self.players_protected = players_protected
def get_request_info(self):
return ({'human_string' : self.human_string,
'action_requested' : self.action_requested,
'current_hand' : self.current_hand,
'discard_pile' : self.discard_pile,
'move_history' : self.move_history,
'players_active_status' : self.players_active_status,
'players_protection_status' : self.players_protection_status,
'invalid_moves' : self.invalid_moves,
'valid_moves' : self.valid_moves,
'players_active' : self.players_active,
'players_protected' : self.players_protected})
| 1.875 | 2 |
tests/test_bap_comment.py | gitoleg/bap-ida-python | 81 | 85764 | from bap.utils.bap_comment import parse, dumps, is_valid
def test_parse():
assert parse('hello') is None
assert parse('BAP: hello') == {'hello': []}
assert parse('BAP: hello,world') == {'hello': [], 'world': []}
assert parse('BAP: hello=cruel,world') == {'hello': ['cruel', 'world']}
assert parse('BAP: hello="hello, world"') == {'hello': ['hello, world']}
assert parse('BAP: hello=cruel,world goodbye=real,life') == {
'hello': ['cruel', 'world'],
'goodbye': ['real', 'life']
}
assert parse('BAP: hello="f\'"') == {'hello': ["f'"]}
def test_dumps():
assert 'BAP:' in dumps({'hello': []})
assert dumps({'hello': ['cruel', 'world'], 'nice': [], 'thing': []}) == \
'BAP: nice,thing hello=cruel,world'
assert dumps({'hello': ["world'"]}) == 'BAP: hello="world\'"'
def test_is_valid():
assert is_valid('BAP: hello')
assert is_valid('BAP: hello,world')
assert not is_valid('some comment')
def test_roundup():
comm = {
'x': [], 'y': [], 'z': [],
'a': ['1', '2', '3'],
'b': ['thing\''],
'c': ['many things'],
'd': ['strange \\ things'],
}
assert parse(dumps(parse(dumps(comm)))) == comm
def test_quotation():
data = 'BAP: chars="{\\\"a\\\", \\\"b\\\", \\\"c\\\"}"'
assert parse(data) == {'chars': ['{"a", "b", "c"}']}
assert parse(data) == parse(dumps(parse(data)))
def test_single_quote():
data = 'BAP: key="{can\\\'t do}"'
assert parse(data) == {'key': ["{can\\'t do}"]}
| 2.0625 | 2 |
django/django-pyodbc-azure/DjangoWebProject1/__init__.py | garotogordo/azure-sql-database-samples | 674 | 85892 | """
Package for DjangoWebProject1.
"""
| 0.0271 | 0 |
port_scanner.py | wesleywh/portScanner | 0 | 86020 | <reponame>wesleywh/portScanner
#!/usr/bin/env python
import socket #for socket scanning
from threading import * #for threading
import subprocess #to run the clean command and system exit
import sys
from datetime import datetime
#from queue import Queue #for threading (using queues)
import argparse #used for allowing command line switches
def main():
#Available command line options
parser = argparse.ArgumentParser(description='Allow command line arguments.')
parser.add_argument('-H',metavar='H', nargs="+", help="Target host to scan")
parser.add_argument('-p',metavar="p", nargs="+", help="List of Ports to Scan")
#all all available arguments to the 'args' variable
args = parser.parse_args()
#check what time the scan started
t1=datetime.now()
scanports = []
#parse the arguments for the ports and assign port numbers to be scanned
if "-" in args.p[0]:
temp = []
temp = args.p[0].replace('-',' ').split(' ')
temp[-1] = int(temp[-1])
for portnumber in range(1,temp[-1]):
scanports.append(portnumber);
elif len(args.p) > 1:
for portnumber in args.p:
scanports.append(portnumber);
else:
scanports = args.p
#assign the variables
for host in args.H:
#remoteServer = host
#remoteServerIP = socket.gethostbyname(remoteServer)
#print a banner with info on which host we are about to scan
print "-" * 60
print "Please wait, scanning remote host",host
string = "Scanning Ports "
for portInput in args.p:
string += portInput+" "
print string
print "-" * 60
#threaded port scanning
scan(host, scanports)
#Checking the time again
t2=datetime.now()
#Calculates the differences of time, to see how log it took to run the script
total = t2 - t1
#print information to screen
print "Scanning Completed in: ", total
def scan(host, ports):
#Using the range function to specify ports (scan ports between 1 and 1024)
#We also put in some error handling for catching errors
threads = []
for port in ports:
t = Thread(target=worker, args=(host,port))
threads.append(t)
t.start()
def worker(remoteServerIP, port):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((remoteServerIP, port))
if result == 0:
print "Port {}: \t Open".format(port)
sock.close()
except KeyboardInterrupt:
print "Canceled the scan."
sys.exit()
except socket.gaierror:
print "Hostname could not be resolved. Exiting."
sys.exit()
except socket.error:
print "Could not connect to server"
sys.exit()
if __name__ == "__main__":
subprocess.call('clear', shell=True) #clear the screen
main() #execute main code
| 2.125 | 2 |
explorer/tests/test_utils.py | velveteer/django-sql-explorer | 1 | 86148 | from django.test import TestCase
from explorer.actions import generate_report_action
from explorer.tests.factories import SimpleQueryFactory
from explorer import app_settings
from explorer.utils import passes_blacklist, schema_info, param, swap_params, extract_params, shared_dict_update, EXPLORER_PARAM_TOKEN, execute_query
class TestSqlBlacklist(TestCase):
def setUp(self):
self.orig = app_settings.EXPLORER_SQL_BLACKLIST
def tearDown(self):
app_settings.EXPLORER_SQL_BLACKLIST = self.orig
def test_overriding_blacklist(self):
app_settings.EXPLORER_SQL_BLACKLIST = []
r = SimpleQueryFactory(sql="SELECT 1+1 AS \"DELETE\";")
fn = generate_report_action()
result = fn(None, None, [r, ])
self.assertEqual(result.content, 'DELETE\r\n2\r\n')
def test_default_blacklist_prevents_deletes(self):
r = SimpleQueryFactory(sql="SELECT 1+1 AS \"DELETE\";")
fn = generate_report_action()
result = fn(None, None, [r, ])
self.assertEqual(result.content, '0')
def test_queries_modifying_functions_are_ok(self):
sql = "SELECT 1+1 AS TWO; drop view foo;"
self.assertTrue(passes_blacklist(sql))
def test_queries_deleting_stuff_are_not_ok(self):
sql = "'distraction'; delete from table; SELECT 1+1 AS TWO; drop view foo;"
self.assertFalse(passes_blacklist(sql))
def test_queries_dropping_views_is_ok_and_not_case_sensitive(self):
sql = "SELECT 1+1 AS TWO; drop ViEw foo;"
self.assertTrue(passes_blacklist(sql))
class TestSchemaInfo(TestCase):
def test_schema_info_returns_valid_data(self):
res = schema_info()
tables = [a[1] for a in res]
self.assertIn('explorer_query', tables)
def test_app_exclusion_list(self):
app_settings.EXPLORER_SCHEMA_EXCLUDE_APPS = ('explorer',)
res = schema_info()
app_settings.EXPLORER_SCHEMA_EXCLUDE_APPS = ('',)
tables = [a[1] for a in res]
self.assertNotIn('explorer_query', tables)
class TestParams(TestCase):
def test_swappable_params_are_built_correctly(self):
expected = EXPLORER_PARAM_TOKEN + 'foo' + EXPLORER_PARAM_TOKEN
self.assertEqual(expected, param('foo'))
def test_params_get_swapped(self):
sql = 'please swap $$this$$ and $$that$$'
expected = 'please swap here and there'
params = {'this': 'here', 'that': 'there'}
got = swap_params(sql, params)
self.assertEqual(got, expected)
def test_empty_params_does_nothing(self):
sql = 'please swap $$this$$ and $$that$$'
params = None
got = swap_params(sql, params)
self.assertEqual(got, sql)
def test_non_string_param_gets_swapper(self):
sql = 'please swap $$this$$'
expected = 'please swap 1'
params = {'this': 1}
got = swap_params(sql, params)
self.assertEqual(got, expected)
def test_extracting_params(self):
sql = 'please swap $$this$$'
expected = {'this': ''}
self.assertEqual(extract_params(sql), expected)
def test_shared_dict_update(self):
source = {'foo': 1, 'bar': 2}
target = {'bar': None} # ha ha!
self.assertEqual({'bar': 2}, shared_dict_update(target, source)) | 1.570313 | 2 |
workflow_session/ingest.py | datajoint/workflow-session | 0 | 86276 | import csv
from workflow_session.pipeline import lab, subject, session
def ingest_general(csvs, tables, skip_duplicates=True, verbose=True):
"""
Inserts data from a series of csvs into their corresponding table:
e.g., ingest_general(['./lab_data.csv', './proj_data.csv'],
[lab.Lab(),lab.Project()]
ingest_general(csvs, tables, skip_duplicates=True)
:param csvs: list of relative paths to CSV files. CSV are delimited by commas.
:param tables: list of datajoint tables with ()
:param verbose: print number inserted (i.e., table length change)
"""
for csv_filepath, table in zip(csvs, tables):
with open(csv_filepath, newline="") as f:
data = list(csv.DictReader(f, delimiter=","))
if verbose:
prev_len = len(table)
table.insert(
data,
skip_duplicates=skip_duplicates,
# Ignore extra fields because some CSVs feed multiple tables
ignore_extra_fields=True,
)
if verbose:
insert_len = len(table) - prev_len # report length change
print(
f"\n---- Inserting {insert_len} entry(s) "
+ f"into {table.table_name} ----"
)
def ingest_lab(
lab_csv_path="./user_data/lab/labs.csv",
project_csv_path="./user_data/lab/projects.csv",
publication_csv_path="./user_data/lab/publications.csv",
keyword_csv_path="./user_data/lab/keywords.csv",
protocol_csv_path="./user_data/lab/protocols.csv",
users_csv_path="./user_data/lab/users.csv",
project_user_csv_path="./user_data/lab/project_users.csv",
skip_duplicates=True,
verbose=True,
):
"""
Inserts data from a CSVs into their corresponding lab schema tables.
By default, uses data from workflow_session/user_data/lab/
:param lab_csv_path: relative path of lab csv
:param project_csv_path: relative path of project csv
:param publication_csv_path: relative path of publication csv
:param keyword_csv_path: relative path of keyword csv
:param protocol_csv_path: relative path of protocol csv
:param users_csv_path: relative path of users csv
:param project_user_csv_path: relative path of project users csv
:param skip_duplicates=True: datajoint insert function param
:param verbose: print number inserted (i.e., table length change)
"""
# List with repeats for when mult dj.tables fed by same CSV
csvs = [
lab_csv_path,
lab_csv_path,
project_csv_path,
project_csv_path,
publication_csv_path,
keyword_csv_path,
protocol_csv_path,
protocol_csv_path,
users_csv_path,
users_csv_path,
users_csv_path,
project_user_csv_path,
]
tables = [
lab.Lab(),
lab.Location(),
lab.Project(),
lab.ProjectSourceCode(),
lab.ProjectPublication(),
lab.ProjectKeywords(),
lab.ProtocolType(),
lab.Protocol(),
lab.UserRole(),
lab.User(),
lab.LabMembership(),
lab.ProjectUser(),
]
ingest_general(csvs, tables, skip_duplicates=skip_duplicates, verbose=verbose)
def ingest_subjects(
subject_csv_path="./user_data/subject/subjects.csv",
subject_part_csv_path="./user_data/subject/subjects_part.csv",
skip_duplicates=True,
verbose=True,
):
"""
Inserts data from a subject csv into corresponding subject schema tables
By default, uses data from workflow_session/user_data/subject/
:param subject_csv_path: relative path of csv for subject data
:param subject_part_csv_path: relative path of csv for subject part tables
:param skip_duplicates=True: datajoint insert function param
:param verbose: print number inserted (i.e., table length change)
"""
csvs = [
subject_csv_path,
subject_csv_path,
subject_csv_path,
subject_part_csv_path,
subject_part_csv_path,
subject_part_csv_path,
]
tables = [
subject.Subject(),
subject.SubjectDeath(),
subject.SubjectCullMethod(),
subject.Subject.Protocol(),
subject.Subject.User(),
subject.Subject.Lab(),
]
ingest_general(csvs, tables, skip_duplicates=skip_duplicates, verbose=verbose)
def ingest_sessions(
session_csv_path="./user_data/session/sessions.csv",
skip_duplicates=True,
verbose=True,
):
"""
Inserts data from a sessions csv into corresponding session schema tables
By default, uses data from workflow_session/user_data/session/
:param session_csv_path: relative path of session csv
:param skip_duplicates=True: datajoint insert function param
:param verbose: print number inserted (i.e., table length change)
"""
csvs = [
session_csv_path,
session_csv_path,
session_csv_path,
session_csv_path,
session_csv_path,
]
tables = [
session.Session(),
session.SessionDirectory(),
session.SessionNote(),
session.ProjectSession(),
session.SessionExperimenter(),
]
ingest_general(csvs, tables, skip_duplicates=skip_duplicates, verbose=verbose)
if __name__ == "__main__":
ingest_lab()
ingest_subjects()
ingest_sessions()
| 2.140625 | 2 |
etl/parsers/etw/Microsoft_Windows_WMPNSS_PublicAPI.py | IMULMUL/etl-parser | 104 | 86404 | # -*- coding: utf-8 -*-
"""
Microsoft-Windows-WMPNSS-PublicAPI
GUID : 614696c9-85af-4e64-b389-d2c0db4ff87b
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=100, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_100_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=101, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_101_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=102, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_102_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=103, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_103_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=104, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_104_0(Etw):
pattern = Struct(
"LibraryName" / WString,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=105, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_105_0(Etw):
pattern = Struct(
"LibraryName" / WString,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=106, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_106_0(Etw):
pattern = Struct(
"LibraryName" / WString,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=107, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_107_0(Etw):
pattern = Struct(
"LibraryName" / WString,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=108, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_108_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=109, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_109_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=110, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_110_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=111, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_111_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=112, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_112_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=113, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_113_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=114, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_114_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=115, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_115_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=116, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_116_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=117, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_117_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=118, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_118_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=119, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_119_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=120, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_120_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=121, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_121_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=122, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_122_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=123, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_123_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=124, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_124_0(Etw):
pattern = Struct(
"MACAddress" / WString,
"FriendlyName" / WString,
"Authorize" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=125, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_125_0(Etw):
pattern = Struct(
"MACAddress" / WString,
"FriendlyName" / WString,
"Authorize" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=126, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_126_0(Etw):
pattern = Struct(
"MACAddress" / WString,
"Authorize" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=127, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_127_0(Etw):
pattern = Struct(
"MACAddress" / WString,
"Authorize" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=128, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_128_0(Etw):
pattern = Struct(
"Devices" / Int64ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=129, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_129_0(Etw):
pattern = Struct(
"Devices" / Int64ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=130, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_130_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=131, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_131_0(Etw):
pattern = Struct(
"Enable" / Int8ul,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=132, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_132_0(Etw):
pattern = Struct(
"DeviceID" / WString,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=133, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_133_0(Etw):
pattern = Struct(
"DeviceID" / WString,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=134, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_134_0(Etw):
pattern = Struct(
"SecurityGroup" / WString,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=135, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_135_0(Etw):
pattern = Struct(
"SecurityGroup" / WString,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=136, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_136_0(Etw):
pattern = Struct(
"SecurityGroup" / WString,
"HResult" / Int32ul
)
@declare(guid=guid("614696c9-85af-4e64-b389-d2c0db4ff87b"), event_id=137, version=0)
class Microsoft_Windows_WMPNSS_PublicAPI_137_0(Etw):
pattern = Struct(
"SecurityGroup" / WString,
"HResult" / Int32ul
)
| 0.863281 | 1 |
packages/pyre/externals/Library.py | lijun99/pyre | 3 | 86532 | # -*- coding: utf-8 -*-
#
# <NAME>
# orthologue
# (c) 1998-2019 all rights reserved
#
# access to the framework
import pyre
# superclass
from .Package import Package
# my declaration
class Library(Package):
"""
Base class for third party libraries
"""
# user configurable state
defines = pyre.properties.strings()
defines.doc = "the compile time markers that indicate my presence"
incdir = pyre.properties.paths()
incdir.doc = "the locations of my headers; for the compiler command line"
libdir = pyre.properties.paths()
libdir.doc = "the locations of my libraries; for the linker command path"
# end of file
| 1.109375 | 1 |
setup.py | creidinger/discord_messages.py | 0 | 86660 | from setuptools import setup
# source: https://packaging.python.org/tutorials/packaging-projects/
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name="discord_messages.py",
version="0.0.0",
author="<NAME>",
author_email="<EMAIL>",
description="A python interface for the Discord API",
long_description=long_description,
long_description_content_type="text/markdown",
py_modules=["discord_messages"],
url="https://github.com/creidinger/discord_messages.py",
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
python_requires=">=3.8",
)
| 1.085938 | 1 |
trainer/ATMSKLearnDecisionTreeRegressorGridSearch.py | automatic-dot-ai/automatic | 0 | 86788 | #
# @component {
# "kind" : "trainer",
# "language" : "py",
# "description" : "Performs grid search over the 'hyper' parameter for a decision tree regressor trained on the input data",
# "permissions": "public",
# "properties": [
# { "name": "Hyperparameter" , "field": "hyperparameter", "kind": "string", "minlen": 2, "maxlen": 32, "required": true, "default": "max_depth" },
# { "name": "Min" , "field": "min", "kind": "number", "min": -100000, "max": 100000, "required": true, "default": 1 },
# { "name": "Max" , "field": "max", "kind": "number", "min": -100000, "max": 100000, "required": true, "default": 11 }
# ],
# "inputs": ["X:pandas", "y:pandas"],
# "outputs": ["X:pandas", "y:pandas"],
# "dependencies": ["pandas", "sklearn"],
# "readme" : "",
# "license" : "",
# "links": ["https://www.ritchieng.com/machine-learning-project-boston-home-prices/"]
# }
from sklearn.metrics import r2_score
from sklearn.metrics import make_scorer
from sklearn.tree import DecisionTreeRegressor
from sklearn.grid_search import GridSearchCV
import pandas as pd
def ATMSKLearnDecisionTreeRegressorGridSearch(ATM):
X = ATM.inputs["X"]
y = ATM.inputs["y"]
cv_sets = ShuffleSplit(X.shape[0], n_iter = 10, test_size = 0.20, random_state = 0)
regressor = DecisionTreeRegressor(random_state=0)
params = dict
params[ATM.props["hyperparameter"]]=range(ATM.props["min"], ATM.props["max"])
scoring_fnc = make_scorer(performance_metric)
grid = GridSearchCV(regressor, params, cv=cv_sets, scoring=scoring_fnc)
#grid_search = grid_search.fit(X_train, y_train, callbacks=[MyCallback()]) worked for me.
grid = grid.fit(X, y)
model = grid.best_estimator_
ATM.report({ 'name': "stats", 'stats': model.get_params() })
ATM.save("model", pickle.dumps(model, protocol=pickle.HIGHEST_PROTOCOL))
ATM.output({ 'X': X, 'y': y })
""" Calculates and returns the performance score between
true and predicted values based on the metric chosen. """
def performance_metric(y_true, y_predict):
score = r2_score(y_true, y_predict)
return score
| 2.34375 | 2 |
floodsystem/flood.py | hl600/flood_warning_system_38 | 0 | 86916 |
from stationdata import build_station_list, update_water_levels
from utils import sorted_by_key
def stations_level_over_threshold(stations,tol):
list_of_tup = []
for station in stations:
if station.typical_range_consistent() == True:
if station.latest_level != None and station.relative_water_level() > tol:
tup = (station,station.relative_water_level())
list_of_tup.append(tup)
list_of_tup_sorted = sorted_by_key(list_of_tup,1,True)
return list_of_tup_sorted
def stations_highest_rel_level(stations, N):
list_of_tup = []
for station in stations:
if station.typical_range_consistent() == True and station.relative_water_level() != None:
tup = (station,station.relative_water_level())
list_of_tup.append(tup)
list_of_tup_sorted = sorted_by_key(list_of_tup,1,True)[:N]
risk_stations = []
for tup in list_of_tup_sorted:
risk_stations.append(tup[0])
return risk_stations
| 2.109375 | 2 |
eigenloops/eigentools.py | hjrrockies/eigenfun | 0 | 87044 | import numpy as np
import scipy.linalg as la
from progress.bar import IncrementalBar
def eig_trajectories(A,T,verbose=False):
"""Computes the trajectories of the eigenvalues of the
matrix function A(t)
Parameters
----------
A : callable
Matrix-valued function of one parameter t
T : 1d array
Values of the parameter t
Returns
-------
E : ndarray
Array of eigenvalue trajectories where E[i] is the
trajectory of the ith eigenvalue as a 1d array
"""
n,m = A(T[0]).shape
if n!=m:
raise ValueError("Matrix must be square")
m = len(T)
E = np.empty((n,m),dtype="complex")
E[:,0] = la.eig(A(T[0]),right=False)
if verbose: bar = IncrementalBar("Calculating\t", max=m,suffix='%(percent)d%%')
for i,t in enumerate(T[1:]):
w = la.eig(A(t),right=False)
mask = list(range(n))
for eig in w:
idx = np.argmin(np.abs(eig-E[:,i][mask]))
E[mask[idx],i+1] = eig
del mask[idx]
if verbose: bar.next()
if verbose: bar.next(); bar.finish()
return E
def eig_loops(A,U,V,verbose=False):
"""Computes the loops of eigenvalues for the matrix function A(u,v)
Parameters
----------
A : callable
Matrix-valued function of two parameters u,v
U : 1d array
Values of the parameter u
V : 1d array
Values of the parameter v
Returns
-------
L : ndarray
Array of eigenvalue loops where L[i] is a 2d array for the ith eigenvalue.
L[i,j,k] = the ith eigenvalue of A(U[j],V[k])
"""
n,m = A(U[0],V[0]).shape
if n!=m:
raise ValueError("Matrix must be square")
m = len(U)
l = len(V)
L = np.empty((n,m,l),dtype="complex")
B = lambda u: A(u,V[0])
L[:,:,0] = eig_trajectories(B,U)
if verbose: bar = IncrementalBar("Calculating\t", max=m,suffix='%(percent)d%%')
for i,v in enumerate(V[1:]):
B = lambda u: A(u,v)
E = eig_trajectories(B,U)
mask = list(range(n))
for traj in E:
idx = np.argmin(np.abs(traj[0]-L[:,0,i][mask]))
L[mask[idx],:,i+1] = traj
del mask[idx]
if verbose: bar.next()
if verbose: bar.next(); bar.finish()
return L
def eigenvector_trajectories(A,T,verbose=False):
"""Computes the trajectories of the eigenvalues and eigenvectors of the
matrix-valued function A(t)
Parameters
----------
A : callable
Matrix-valued function of one parameter t
T : 1d array
Values of the parameter t
Returns
-------
E : ndarray
Array of eigenvalue trajectories where E[i] is the
trajectory of the ith eigenvalue as a 1d array
V : ndarray
Array of eigenvector trajectories where V[i] is the trajectory of the ith
eigenvector. V[:,i,k] = ith eigenvector of A(T[k])
"""
n,m = A(T[0]).shape
if n!=m:
raise ValueError("Matrix must be square")
m = len(T)
E = np.empty((n,m),dtype="complex")
V = np.empty((n,n,m),dtype="complex")
E[:,0], V[:,:,0] = la.eig(A(T[0]))
if verbose: bar = IncrementalBar("Calculating\t", max=m,suffix='%(percent)d%%')
for i,t in enumerate(T[1:]):
w,v = la.eig(A(t))
mask = list(range(n))
for eig in w:
idx = np.argmin(np.abs(eig-E[:,i][mask]))
E[mask[idx],i+1] = eig
V[:,mask[idx],i+1] = v[:,mask[idx]]*np.sign(v[:,mask[idx]]@V[:,mask[idx],i])
del mask[idx]
if verbose: bar.next()
if verbose: bar.next(); bar.finish()
return E,V
| 2.53125 | 3 |
examples/simpleExample2.py | CharlesHahn/DL-PeptideBuilder | 1 | 87172 | <gh_stars>1-10
"""
A simple example script demonstrating how to build peptide by PeptideConstructor in primitive way.
The script generates a peptide of "AKlsDe" in self-defined conformation, and it stores the peptide under the name "simple_example.pdb".
"""
import Bio.PDB
from PeptideConstructor import Geometry
from PeptideConstructor import PeptideBuilder
## create a peptide of "AKlsDe", uppercases mean L amino acids while lowercases indicate D amino acids
## construct the first amino acid
geo = Geometry.geometry("A")
## delete the next two lines to not assign secondary structure
geo.phi = -60
geo.psi_im1 = -50
structure = PeptideBuilder.initialize_res(geo)
## construcet the rest
geo = Geometry.geometry("K")
PeptideBuilder.add_residue(structure, geo)
geo = Geometry.geometry("l")
PeptideBuilder.add_residue(structure, geo)
geo = Geometry.geometry("s")
PeptideBuilder.add_residue(structure, geo)
geo = Geometry.geometry("D")
PeptideBuilder.add_residue(structure, geo)
geo = Geometry.geometry("e")
PeptideBuilder.add_residue(structure, geo)
## add terminal oxygen (OXT) to the final amino acid
## if "NME" capping has been added, NO OXT should be added.
PeptideBuilder.add_terminal_OXT(structure)
## save peptide structure to a pdb file
out = Bio.PDB.PDBIO()
out.set_structure(structure)
out.save("simpleExample.pdb")
| 2.546875 | 3 |
src/model_ops/lenet.py | hwang595/Draco | 21 | 87300 | <gh_stars>10-100
import torch
from torch import nn
import torch.nn.functional as F
import pandas as pd
import numpy as np
from torch.autograd import Variable
from mpi4py import MPI
import sys
from utils import err_simulation
sys.path.insert(0, '../compress_gradient')
from compress_gradient import compress
#SEED_=428
#torch.manual_seed(SEED_)
# we use LeNet here for our simple case
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(4*4*50, 500)
self.fc2 = nn.Linear(500, 10)
self.ceriation = nn.CrossEntropyLoss()
def forward(self, x):
x = self.conv1(x)
x = F.max_pool2d(x, 2, 2)
x = F.relu(x)
x = self.conv2(x)
x = F.max_pool2d(x, 2, 2)
x = F.relu(x)
x = x.view(-1, 4*4*50)
x = self.fc1(x)
x = self.fc2(x)
#loss = self.ceriation(x, target)
return x
def name(self):
return 'lenet'
class LeNetSplit(nn.Module):
'''
this is a module that we split the module and do backward process layer by layer
please don't call this module for normal uses, this is a hack and run slower than
the automatic chain rule version
'''
def __init__(self):
super(LeNetSplit, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(4*4*50, 500)
self.fc2 = nn.Linear(500, 10)
self.maxpool2d = nn.MaxPool2d(2, stride=2)
self.relu = nn.ReLU()
self.full_modules = [self.conv1, self.conv2, self.fc1, self.fc2]
self._init_channel_index = len(self.full_modules)*2
self.criterion = nn.CrossEntropyLoss()
def forward(self, x):
self.output = []
self.input = []
x = Variable(x.data, requires_grad=True)
self.input.append(x)
x = self.conv1(x)
self.output.append(x)
x = Variable(x.data, requires_grad=True)
self.input.append(x)
x = self.maxpool2d(x)
self.output.append(x)
x = Variable(x.data, requires_grad=True)
self.input.append(x)
x = self.relu(x)
self.output.append(x)
x = Variable(x.data, requires_grad=True)
self.input.append(x)
x = self.conv2(x)
self.output.append(x)
x = Variable(x.data, requires_grad=True)
self.input.append(x)
x = self.maxpool2d(x)
self.output.append(x)
x = Variable(x.data, requires_grad=True)
self.input.append(x)
x = self.relu(x)
self.output.append(x)
x = x.view(-1, 4*4*50)
x = Variable(x.data, requires_grad=True)
self.input.append(x)
x = self.fc1(x)
self.output.append(x)
x = Variable(x.data, requires_grad=True)
self.input.append(x)
x = self.fc2(x)
self.output.append(x)
return x
@property
def fetch_init_channel_index(self):
return self._init_channel_index
def backward_normal(self, g, communicator, req_send_check, cur_step, fail_workers, err_mode, compress_grad):
mod_avail_index = len(self.full_modules)-1
#channel_index = len(self.full_modules)*2-2
channel_index = self._init_channel_index - 2
mod_counters_ = [0]*len(self.full_modules)
for i, output in reversed(list(enumerate(self.output))):
req_send_check[-1].wait()
if i == (len(self.output) - 1):
# for last node, use g
output.backward(g)
# get gradient here after some sanity checks:
tmp_grad = self.full_modules[mod_avail_index].weight.grad
if not pd.isnull(tmp_grad):
grads = tmp_grad.data.numpy().astype(np.float64)
############################### simulation here #########################################
if communicator.Get_rank() in fail_workers:
simulation_grad = err_simulation(grad=grads, mode=err_mode)
if compress_grad == 'compress':
_compressed_grad = compress(simulation_grad)
req_isend = communicator.isend(_compressed_grad, dest=0, tag=88+channel_index)
else:
req_isend = communicator.Isend([simulation_grad, MPI.DOUBLE], dest=0, tag=88+channel_index)
else:
if compress_grad == 'compress':
_compressed_grad = compress(grads)
req_isend = communicator.isend(_compressed_grad, dest=0, tag=88+channel_index)
else:
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
#########################################################################################
req_send_check.append(req_isend)
# update counters
mod_avail_index-=1
channel_index-=1
else:
continue
else:
output.backward(self.input[i+1].grad.data)
tmp_grad_weight = self.full_modules[mod_avail_index].weight.grad
tmp_grad_bias = self.full_modules[mod_avail_index].bias.grad
if not pd.isnull(tmp_grad_weight) and not pd.isnull(tmp_grad_bias):
# we always send bias first
if mod_counters_[mod_avail_index] == 0:
grads = tmp_grad_bias.data.numpy().astype(np.float64)
############################### simulation here #########################################
if communicator.Get_rank() in fail_workers:
simulation_grad = err_simulation(grad=grads, mode=err_mode)
if compress_grad == 'compress':
_compressed_grad = compress(simulation_grad)
req_isend = communicator.isend(_compressed_grad, dest=0, tag=88+channel_index)
else:
req_isend = communicator.Isend([simulation_grad, MPI.DOUBLE], dest=0, tag=88+channel_index)
else:
if compress_grad == 'compress':
_compressed_grad = compress(grads)
req_isend = communicator.isend(_compressed_grad, dest=0, tag=88+channel_index)
else:
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
#########################################################################################
req_send_check.append(req_isend)
channel_index-=1
mod_counters_[mod_avail_index]+=1
elif mod_counters_[mod_avail_index] == 1:
grads = tmp_grad_weight.data.numpy().astype(np.float64)
############################### simulation here #########################################
if communicator.Get_rank() in fail_workers:
simulation_grad = err_simulation(grad=grads, mode=err_mode)
if compress_grad == 'compress':
_compressed_grad = compress(simulation_grad)
req_isend = communicator.isend(_compressed_grad, dest=0, tag=88+channel_index)
else:
req_isend = communicator.Isend([simulation_grad, MPI.DOUBLE], dest=0, tag=88+channel_index)
else:
if compress_grad == 'compress':
_compressed_grad = compress(grads)
req_isend = communicator.isend(_compressed_grad, dest=0, tag=88+channel_index)
else:
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
#########################################################################################
req_send_check.append(req_isend)
channel_index-=1
mod_counters_[mod_avail_index]+=1
# update counters
mod_avail_index-=1
else:
continue
if mod_counters_[0] == 1:
req_send_check[-1].wait()
grads = tmp_grad_weight.data.numpy().astype(np.float64)
############################### simulation here #########################################
if communicator.Get_rank() in fail_workers:
simulation_grad = err_simulation(grad=grads, mode=err_mode)
if compress_grad == 'compress':
_compressed_grad = compress(simulation_grad)
req_isend = communicator.isend(_compressed_grad, dest=0, tag=88+channel_index)
else:
req_isend = communicator.Isend([simulation_grad, MPI.DOUBLE], dest=0, tag=88+channel_index)
else:
if compress_grad == 'compress':
_compressed_grad = compress(grads)
req_isend = communicator.isend(_compressed_grad, dest=0, tag=88+channel_index)
else:
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
#########################################################################################
req_send_check.append(req_isend)
return req_send_check
def backward_signal_kill(self, g, communicator, req_send_check, cur_step):
'''
This killer is triggered by signals bcasting from master, channel of
signal is kept checking by each worker to determine if they're the
straggler
'''
mod_avail_index = len(self.full_modules)-1
channel_index = self._init_channel_index - 2
mod_counters_ = [0]*len(self.full_modules)
# should kill flag
should_kill = False
for i, output in reversed(list(enumerate(self.output))):
############################ killing process on workers #####################################
for _ in range(10000):
status = MPI.Status()
communicator.Iprobe(0, 77, status)
if status.Get_source() == 0:
print("Worker {}, Cur Step: {} I'm the straggler, killing myself!".format(communicator.Get_rank(), cur_step))
tmp = communicator.recv(source=0, tag=77)
should_kill = True
break
if should_kill:
break
############################################################################################
if i == (len(self.output) - 1):
# for last node, use g
output.backward(g)
# get gradient here after some sanity checks:
tmp_grad = self.full_modules[mod_avail_index].weight.grad
if not pd.isnull(tmp_grad):
grads = tmp_grad.data.numpy().astype(np.float64)
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
req_send_check.append(req_isend)
# update counters
mod_avail_index-=1
channel_index-=1
else:
continue
else:
output.backward(self.input[i+1].grad.data)
tmp_grad_weight = self.full_modules[mod_avail_index].weight.grad
tmp_grad_bias = self.full_modules[mod_avail_index].bias.grad
if not pd.isnull(tmp_grad_weight) and not pd.isnull(tmp_grad_bias):
# we always send bias first
if mod_counters_[mod_avail_index] == 0:
grads = tmp_grad_bias.data.numpy().astype(np.float64)
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
req_send_check.append(req_isend)
channel_index-=1
mod_counters_[mod_avail_index]+=1
elif mod_counters_[mod_avail_index] == 1:
grads = tmp_grad_weight.data.numpy().astype(np.float64)
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
req_send_check.append(req_isend)
channel_index-=1
mod_counters_[mod_avail_index]+=1
# update counters
mod_avail_index-=1
else:
continue
if mod_counters_[0] == 1:
grads = tmp_grad_weight.data.numpy().astype(np.float64)
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
req_send_check.append(req_isend)
return req_send_check
def backward_timeout_kill(self, g, communicator, req_send_check):
"""do we even need this?"""
pass
def backward_coded(self, g, cur_step):
grad_aggregate_list = []
mod_avail_index = len(self.full_modules)-1
#channel_index = len(self.full_modules)*2-2
channel_index = self._init_channel_index - 2
mod_counters_ = [0]*len(self.full_modules)
for i, output in reversed(list(enumerate(self.output))):
if i == (len(self.output) - 1):
# for last node, use g
output.backward(g)
else:
output.backward(self.input[i+1].grad.data)
tmp_grad_weight = self.full_modules[mod_avail_index].weight.grad
tmp_grad_bias = self.full_modules[mod_avail_index].bias.grad
# specific for this fc nn setting
if not pd.isnull(tmp_grad_weight) and not pd.isnull(tmp_grad_bias):
# we always send bias first
if mod_counters_[mod_avail_index] == 0:
grads = tmp_grad_bias.data.numpy().astype(np.float64)
grad_aggregate_list.append(grads)
channel_index-=1
mod_counters_[mod_avail_index]+=1
elif mod_counters_[mod_avail_index] == 1:
grads = tmp_grad_weight.data.numpy().astype(np.float64)
grad_aggregate_list.append(grads)
channel_index-=1
mod_counters_[mod_avail_index]+=1
# update counters
mod_avail_index-=1
else:
continue
if mod_counters_[0] == 1:
grads = tmp_grad_weight.data.numpy().astype(np.float64)
grad_aggregate_list.append(grads)
return grad_aggregate_list | 2.09375 | 2 |
apps/utils/lbs.py | lianxiaopang/camel-store-api | 12 | 87428 | import hashlib
import requests
from haversine import haversine
from requests.exceptions import SSLError, ConnectTimeout, ConnectionError
from django.conf import settings
from django.core.cache import cache
from django.core.files.base import ContentFile
class TencentLBS(object):
key = settings.TENCENT_LBS_KEY
geo_coder_url = 'https://apis.map.qq.com/ws/geocoder/v1/'
static_map_url = 'https://apis.map.qq.com/ws/staticmap/v2/'
def __init__(self, latitude=None, longitude=None, address=None, icon=None):
self.latitude = latitude
self.longitude = longitude
self.address = address
self.icon = icon
def get_location(self, origin=False):
params = {
'key': self.key,
'location': '{},{}'.format(self.latitude, self.longitude)
}
try:
response = requests.get(self.geo_coder_url, params=params)
if origin:
return response.json()
else:
return self.parse_location(response)
except (SSLError, ConnectTimeout, ConnectionError):
return {
"province": "上海",
"city": "上海",
"message": "获取区域发生错误"
}
def get_longitude_and_latitude(self):
params = {
'key': self.key,
'address': '{}'.format(self.address)
}
try:
response = requests.get(self.geo_coder_url, params=params)
return self.parse_longitude_and_latitude(response)
except (SSLError, ConnectTimeout, ConnectionError):
return "获取坐标位置错误"
def get_static_map_img(self, size="339*90", zoom=12, icon=None):
params = {
'key': self.key,
'center': '{},{}'.format(self.latitude, self.longitude),
'zoom': zoom,
'size': size,
'scale': 2 # 高清
}
off_number = 0.0055
latitude = self.off_degree(self.latitude, off_number)
if icon:
params['markers'] = "icon:{}|{},{}".format(icon, latitude, self.longitude)
else:
params['markers'] = "color:blue|{},{}".format(latitude, self.longitude)
try:
response = requests.get(self.static_map_url, params=params)
except (SSLError, ConnectTimeout, ConnectionError):
return "保存静态坐标图失败"
img_file = self.write_image(response.content)
return img_file
def write_image(self, img_content):
file_name = "{}_{}.png".format(self.latitude, self.longitude)
img_file = ContentFile(content=img_content, name=file_name)
return img_file
@staticmethod
def parse_location(response):
if response.status_code == 200:
response_data = response.json()
status = response_data.get('status')
result = response_data.get('result')
if status == 0 and result:
address_component = result.get('address_component', {})
if address_component.get("city", ""):
province = address_component.get('province', '')
city = address_component.get('city', '').replace("市", "")
return {
"province": province,
"city": city,
"message": "success"
}
return {
"province": "上海",
"city": "上海",
"message": "获取区域发生错误"
}
@staticmethod
def parse_longitude_and_latitude(response):
if response.status_code == 200:
response_data = response.json()
status = response_data.get('status')
result = response_data.get('result')
if status == 0 and result:
location = result.get('location', {})
if all(location.values()):
longitude = location.get('lng')
latitude = location.get('lat')
return {
"latitude": latitude,
"longitude": longitude
}
return "获取坐标位置错误({})".format(response_data.get("message"))
return "获取坐标位置错误({})".format(response.status_code)
@staticmethod
def off_degree(degree, number):
return degree if degree < number else float("{0:.6f}".format(degree - number))
def get_address(self):
key = "address-{}".format(self.address)
data = cache.get(key)
if not data:
data = self.get_longitude_and_latitude()
if not isinstance(data, str):
cache.set(key, data, 60 * 60 * 12)
return data
class TencentLBS2(object):
def __init__(self):
self.key = settings.TENCENT_LBS_KEY
self.sk = settings.TENCENT_LBS_SK
def gen_sig(self, params):
alist = []
for k in sorted(params.keys()):
alist.append('='.join((k, params[k])))
params_str = '/ws/geocoder/v1/?' + '&'.join(alist) + self.sk
result = hashlib.md5(params_str.encode()).hexdigest()
return result
def get_location(self, lat, lng):
url = 'https://apis.map.qq.com/ws/geocoder/v1/'
params = {
'key': self.key,
'location': '{},{}'.format(lat, lng)
}
params['sig'] = self.gen_sig(params)
try:
response = requests.get(url, params=params)
return self.parse_location(response)
except (SSLError, ConnectTimeout, ConnectionError):
return "message: 获取区域发生错误"
def one_to_one_distance(self, from_location, to_location):
'''
因为腾讯地图的距离计算api有直径10公里限制, 所以暂时使用经纬度计算距离
:param from_location: {'lat': lat, 'lng': lng}
:param to_location: {'lat': lat, 'lng': lng}
:return: distance(单位:km)
'''
from_location = (from_location.get('lat'), from_location.get('lng'))
to_location = (to_location.get('lat'), to_location.get('lng'))
distance = haversine(from_location, to_location)
return round(distance, 2)
def get_longitude_and_latitude(self, address):
url = 'https://apis.map.qq.com/ws/geocoder/v1/'
params = {
'key': self.key,
'address': address,
}
params['sig'] = self.gen_sig(params)
try:
response = requests.get(url, params=params)
return self.parse_longitude_and_latitude(response)
except (SSLError, ConnectTimeout, ConnectionError):
return "获取坐标位置错误"
# def one_to_many_distance(self, from_location, to_location):
# # 一对多距离计算
# '''
#
# :param from_location: 'lat,lng'
# :param to_location: ['lat,lng', 'lat,lng',...]
# :return:
# '''
# distance_url = 'https://apis.map.qq.com/ws/distance/v1/'
# data = {
# 'from': from_location,
# 'to': ';'.join(to_location),
# 'key': self.key,
# }
# try:
# response = requests.get(distance_url, params=data)
# return self.parse_distance(response.json())
# except (SSLError, ConnectTimeout, ConnectionError):
# return "获取距离信息发生错误"
def one_to_many_distance(self, from_location, to_location):
# 一对多距离计算
'''
因为腾讯地图的距离计算api有直径10公里限制, 所以暂时使用经纬度计算距离
:param from_location: 'lat,lng'
:param to_location: ['lat,lng', 'lat,lng',...]
:return:
'''
distance_list = []
from_location = tuple(float(i) for i in from_location.split(','))
for index, to in enumerate(to_location):
to_ = tuple(float(i) for i in to.split(','))
distance = haversine(from_location, to_)
distance_list.append({'index': index, 'distance': round(distance, 2)})
distance_list = sorted(distance_list, key=lambda x: x.get('distance'))
return distance_list
@staticmethod
def parse_distance(response):
if response.get('status') == 0:
result = response.get('result')
elements = result.get('elements')
distance_list = []
for index, element in enumerate(elements):
if element.get('distance') >= 0:
distance_list.append({'index': index, 'distance': element.get('distance')})
distance_list = sorted(distance_list, key=lambda x: x.get('distance'))
return distance_list
print(response)
return f"获取距离信息发生错误:{response.get('message')}"
@staticmethod
def parse_location(response):
if response.status_code == 200:
response_data = response.json()
status = response_data.get('status')
result = response_data.get('result')
if status == 0 and result:
address_component = result.get('address_component', {})
if address_component.get("city", ""):
province = address_component.get('province', '')
city = address_component.get('city', '')
district = address_component.get('district', '')
return {
"province": province,
"city": city,
"district": district,
"message": "success",
"address": province+city+district
}
return "message: 获取区域发生错误"
@staticmethod
def parse_longitude_and_latitude(response):
if response.status_code == 200:
response_data = response.json()
status = response_data.get('status')
result = response_data.get('result')
if status == 0 and result:
location = result.get('location', {})
if all(location.values()):
longitude = location.get('lng')
latitude = location.get('lat')
return {
"lat": latitude,
"lng": longitude
}
return "获取坐标位置错误({})".format(response_data.get("message"))
return "获取坐标位置错误({})".format(response.status_code)
lbs = TencentLBS2() | 1.578125 | 2 |
mediafeed/jobs/__init__.py | media-feed/mediafeed | 0 | 87556 | import os
from logging import getLogger
from simpleworker import WorkerManager
from ..commands import download_media, update_items
from ..settings import DATA_PATH, WORKERS_DOWNLOAD_MEDIA, WORKERS_UPDATE_ITEMS
__all__ = ('job_manager',)
def make_worker(cmd):
def worker(name, queue):
log = getLogger('mediafeed.jobs.%s' % name)
log.info('Iniciando worker')
while True:
job = queue.get()
if job is None:
log.info('Parando worker')
return
try:
log.info('Executando %r' % job)
cmd(**job)
except Exception as e:
log.error(e)
queue.done(job)
return worker
job_manager = WorkerManager(path=os.path.join(DATA_PATH, 'tasks'))
job_manager.register('download_media', WORKERS_DOWNLOAD_MEDIA, make_worker(download_media))
job_manager.register('update_items', WORKERS_UPDATE_ITEMS, make_worker(update_items))
| 1.515625 | 2 |
src/services/tts.py | x2012x/conductor | 4 | 87684 | '''
Created on Apr 3, 2021
@author: x2012x
'''
import logging
from google.cloud import texttospeech
import hashlib
import os
from errors.exceptions import TTSFailure
from services.base import BaseService
from errors.reasons import get_general_failure
from services.audio import PlayRequest
from pathlib import Path
logger = logging.getLogger(__name__)
class TextToSpeechService(BaseService):
''' Provides access to TTS service operations.
Service depends on Google's Text-To-Speech API. Your Google TTS JSON
config must be staged to 'resources/config/google-tts.json'
Arguments:
conductor (Conductor): reference to the running Conductor instance.
'''
def __init__(self, conductor):
super().__init__(conductor, 'tts')
# Google TTS client
self._client = texttospeech.TextToSpeechClient.from_service_account_file('resources/config/google-tts.json')
# Google TTS voice configuration
self._voice = texttospeech.VoiceSelectionParams(language_code="en-GB",
name="en-GB-Standard-F",
ssml_gender=texttospeech.SsmlVoiceGender.FEMALE)
# Google Audio file configuration
self._audio_config = texttospeech.AudioConfig(audio_encoding=texttospeech.AudioEncoding.MP3)
# Google has a max character limit, service will fail any request that exceeds this limit, before sending to Google.
self._character_limit = 5000
# TODO: Need to setup a scheduler to periodically cleanup the cache.
# TTS audio is cached in this directory for reuse. If the same phrase is being
# requested, the service will play from the cache instead of sending a request to Google.
self._cache = 'resources/cache/tts_cache'
Path(self._cache).mkdir(parents=True, exist_ok=True)
def speak_response(self, response):
''' Speak the supplied response object.
Arguments:
response (Response): Response object to speak.
'''
self.speak(response.speech.text, response.background_audio, response.background_volume_shift)
def speak(self, text_content, background_audio = None, background_volume_shift = 20):
''' Speak the supplied text_content while playing the optional background_audio track.
text_content will be sent to the Google TTS service. Once the TTS audio file is
received from Google, an AudioService PlayRequest is constructed to process the returned audio
file and the optional background track.
Arguments:
text_content (str): text to send to Google TTS.
background_audio (str): path to audio file to play as background audio track
background_volume_shift (int): amount of volume decrease that should be used for the
background track
'''
# Check for Google TTS max characters before transmitting request
if len(text_content) > self._character_limit:
logger.error('Text to speak exceeds TTS character limit')
raise TTSFailure(get_general_failure())
try:
# Get hash of the text_content being spoken.
text_hash = hashlib.sha256(text_content.encode('utf-8')).hexdigest()
# Use that hash to construct filenames for the text and audio file that will be stored in cache
audio_file = os.path.join(self._cache, f'{text_hash}.mp3')
text_file = os.path.join(self._cache, f'{text_hash}.txt')
# If the current text doesn't exist in cache, send request to Google.
if not os.path.exists(audio_file):
synthesis_input = texttospeech.SynthesisInput(text=text_content)
response = self._client.synthesize_speech(input=synthesis_input, voice=self._voice, audio_config=self._audio_config)
with open(audio_file, "wb") as out:
out.write(response.audio_content)
logger.debug(f'Created new recording at {audio_file}')
with open(text_file, "w") as out:
out.write(text_content)
logger.debug(f'Created transcription at {text_file}')
else:
logger.info(f'Playing audio from cache {audio_file}')
# Send a PlayRequest to the audio service to play the TTS audio and optional background track.
self.conductor.audio.play(PlayRequest(audio_file, background_audio, background_volume_shift = background_volume_shift))
except Exception:
raise TTSFailure(text_content)
| 1.703125 | 2 |
example_site/address/__init__.py | gnipp/django-address | 0 | 87812 | default_app_config = "address.apps.AddressConfig"
| -0.096191 | 0 |
core/utils/iou.py | FMsunyh/keras-fpn | 7 | 87940 | <reponame>FMsunyh/keras-fpn<filename>core/utils/iou.py
#!/usr/bin/python3
"""
Copyright 2018-2019 Firmin.Sun (<EMAIL>)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# -----------------------------------------------------
# @Time : 12/14/2018 11:10 AM
# @Author : Firmin.Sun (<EMAIL>)
# @Software: ZJ_AI
# -----------------------------------------------------
# -*- coding: utf-8 -*-
import keras
import tensorflow
def overlapping(anchors, gt_boxes):
"""
Overlaps between the anchors and the gt boxes
:param anchors: Generated anchors
:param gt_boxes: Ground truth bounding boxes
:return:
"""
# assert keras.backend.ndim(anchors) == 2
# assert keras.backend.ndim(gt_boxes) == 2
reference = compute_overlap(anchors, gt_boxes)
gt_argmax_overlaps_inds = keras.backend.argmax(reference, axis=0)
argmax_overlaps_inds = keras.backend.argmax(reference, axis=1)
indices = keras.backend.stack([
tensorflow.range(keras.backend.shape(anchors)[0]),
keras.backend.cast(argmax_overlaps_inds, "int32")
], axis=0)
indices = keras.backend.transpose(indices)
max_overlaps = tensorflow.gather_nd(reference, indices)
return argmax_overlaps_inds, max_overlaps, gt_argmax_overlaps_inds
def compute_overlap(a, b):
"""
:param a: (N, 4) ndarray of float
:param b: (K, 4) ndarray of float
:return: overlaps: (N, K) ndarray of overlap between boxes and query_boxes
"""
area = (b[:, 2] - b[:, 0] + 1) * (b[:, 3] - b[:, 1] + 1)
iw = keras.backend.minimum(keras.backend.expand_dims(a[:, 2], 1), b[:, 2]) - keras.backend.maximum(keras.backend.expand_dims(a[:, 0], 1), b[:, 0]) + 1
ih = keras.backend.minimum(keras.backend.expand_dims(a[:, 3], 1), b[:, 3]) - keras.backend.maximum(keras.backend.expand_dims(a[:, 1], 1), b[:, 1]) + 1
iw = keras.backend.maximum(iw, 0)
ih = keras.backend.maximum(ih, 0)
ua = keras.backend.expand_dims((a[:, 2] - a[:, 0] + 1) * (a[:, 3] - a[:, 1] + 1), 1) + area - iw * ih
ua = keras.backend.maximum(ua, 0.0001)
intersection = iw * ih
return intersection / ua
| 1.726563 | 2 |
EDMScripts/ManualStateSelector.py | jstammers/EDMSuite | 6 | 88068 | from System import Random
rand = Random()
def randomBool():
if rand.Next(0,2) < 1:
return "True"
else:
return "False"
def run_script():
print("(" + randomBool() + ", " + randomBool() + ", " + randomBool() + ")" )
| 1.476563 | 1 |
trellab/__init__.py | Edinburgh-Genome-Foundry/trellab | 2 | 88196 | <filename>trellab/__init__.py
""" dna_sequencing_viewer/__init__.py """
# __all__ = []
from .trellab import TrellabOrganizationClient | 0.259766 | 0 |
main.py | dertilo/pytorch-lightning-sagemaker | 0 | 88324 | import json
import boto3
import sagemaker
import wandb
from sagemaker.pytorch import PyTorch
# based on: https://github.com/aletheia/mnist_pl_sagemaker/blob/master/main.py
source_dir = 'code'
wandb.sagemaker_auth(path=source_dir)
sagemaker_session = sagemaker.Session()
# bucket_name = sagemaker_session.default_bucket()
bucket_name = "sagemaker-eu-central-1-706022464121/pytorch-training-2020-10-26-19-02-00-900/output"
bucket = f's3://{bucket_name}'
role = 'arn:aws:iam::706022464121:role/SageMakerRole_MNIST' # sagemaker.get_execution_role()
estimator = PyTorch(
entry_point='train.py',
source_dir=source_dir,
role=role,
framework_version='1.4.0',
py_version="py3",
instance_count=1,
# instance_type="local",# 'ml.p2.xlarge',
instance_type="ml.c5.xlarge",#"ml.g4dn.xlarge",# 'ml.p2.xlarge',
use_spot_instances = True,
max_wait = 24 * 60 * 60, # seconds; see max_run
# checkpoint_s3_uri = ... #TODO(tilo)
hyperparameters={
'max_epochs': 2,
'batch_size': 32,
})
estimator.fit(f"{bucket}")
# [ml.p2.xlarge, ml.m5.4xlarge, ml.m4.16xlarge, ml.c5n.xlarge, ml.p3.16xlarge, ml.m5.large, ml.p2.16xlarge, ml.c4.2xlarge, ml.c5.2xlarge, ml.c4.4xlarge, ml.c5.4xlarge, ml.c5n.18xlarge, ml.g4dn.xlarge, ml.g4dn.12xlarge, ml.c4.8xlarge, ml.g4dn.2xlarge, ml.c5.9xlarge, ml.g4dn.4xlarge, ml.c5.xlarge, ml.g4dn.16xlarge, ml.c4.xlarge, ml.g4dn.8xlarge, ml.c5n.2xlarge, ml.c5n.4xlarge, ml.c5.18xlarge, ml.p3dn.24xlarge, ml.p3.2xlarge, ml.m5.xlarge, ml.m4.10xlarge, ml.c5n.9xlarge, ml.m5.12xlarge, ml.m4.xlarge, ml.m5.24xlarge, ml.m4.2xlarge, ml.p2.8xlarge, ml.m5.2xlarge, ml.p3.8xlarge, ml.m4.4xlarge] | 1.429688 | 1 |
Enigma/Enigma-master/Classical_Gan/classical_gan.py | Q-Alpha/Hackathon2020 | 12 | 88452 | import tensorflow as tf
import numpy as np
import random
import time
import os
from tensorflow.keras import layers
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
generator_optimizer = tf.keras.optimizers.Adam(1e-4)
discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)
#The Generator Model
def generator_model(n):
model = tf.keras.Sequential()
model.add(layers.Dense(n*n*256, use_bias=False, input_shape=(100,)))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Reshape((n, n, 256)))
assert model.output_shape == (None, n, n, 256) # Note: None is the batch size
model.add(layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False))
assert model.output_shape == (None, n, n, 128)
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False))
assert model.output_shape == (None, 2*n, 2*n, 64)
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.Conv2DTranspose(1, (5, 5), strides=(1, 1), padding='same', use_bias=False, activation='tanh'))
assert model.output_shape == (None, 2*n, 2*n, 1)
return model
def discriminator_model():
model = tf.keras.Sequential()
model.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same',
input_shape=[4, 4, 1]))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'))
model.add(layers.ReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Flatten())
model.add(layers.Dense(1))
return model
#Loss and Optimizer
def D_loss(real_output, fake_output):
real_loss = cross_entropy(tf.ones_like(real_output), real_output)
fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
total_loss = real_loss + fake_loss
return total_loss
def G_loss(fake_output):
return cross_entropy(tf.ones_like(fake_output), fake_output)
#Training Functions
def train_step(adj_matrix, n = 2):
generator = generator_model(n)
discriminator = discriminator_model()
noise_dim = 100
num_of_generated_examples = 16
seed = tf.random.normal([num_of_generated_examples, noise_dim
noise = tf.random.normal([100, noise_dim])
checkpoint_dir = './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer,
discriminator_optimizer=discriminator_optimizer,
generator=generator,
discriminator=discriminator)
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
generated_matrix = generator(noise, training=True)
real_output = discriminator(adj_matrix, training=True)
fake_output = discriminator(generated_matrix, training=True)
gen_loss = G_loss(fake_output)
disc_loss = D_loss(real_output, fake_output)
gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)
generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))
discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))
#Train
def train_GAN(dataset, epochs, path = "/"):
for epoch in range(epochs):
start = time.time()
for batch in dataset:
train_step(batch)
# Save the model every 15 epochs
if (epoch + 1) % 15 == 0:
checkpoint.save(file_prefix = checkpoint_prefix)
print ('Time for epoch {} is {} sec'.format(epoch + 1, time.time()-start))
#Model Path
generator.save(path)
| 1.984375 | 2 |
Desktop10.4.1/python/osgeo/__init__.py | Esri/raster2gpkg | 13 | 88580 | # __init__ for osgeo package.
# making the osgeo package version the same as the gdal version:
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_gdal', [dirname(__file__)])
except ImportError:
import _gdal
return _gdal
if fp is not None:
try:
_mod = imp.load_module('_gdal', fp, pathname, description)
finally:
fp.close()
return _mod
_gdal = swig_import_helper()
del swig_import_helper
else:
import _gdal
__version__ = _gdal.__version__ = _gdal.VersionInfo("RELEASE_NAME")
| 0.996094 | 1 |
mvc/models/base.py | PyXRD/pyxrd | 27 | 88708 | <filename>mvc/models/base.py
# coding=UTF-8
# ex:ts=4:sw=4:et=on
# -------------------------------------------------------------------------
# Copyright (C) 2014 by <NAME> <mathijs dot dumon at gmail dot com>
# Copyright (C) 2005 by <NAME> <<EMAIL>>
#
# mvc is a framework derived from the original pygtkmvc framework
# hosted at: <http://sourceforge.net/projects/pygtkmvc/>
#
# mvc is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# mvc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110, USA.
# -------------------------------------------------------------------------
import inspect
import logging
from mvc.support.gui_loop import add_idle_call
logger = logging.getLogger(__name__)
from weakref import WeakKeyDictionary
try:
import threading as threading
except ImportError:
import dummy_threading as threading
try:
from fastrlock.rlock import FastRLock as RLock
except ImportError:
from threading import RLock
from ..support.collections.weak_list import WeakList
from ..support.observables import ObsWrapperBase, Signal
from ..observers import Observer, NTInfo
from .metaclasses import ModelMeta
from .properties import UUIDProperty
class Model(Observer, metaclass=ModelMeta):
"""
.. attribute:: __observables__
Class attribute. A list or tuple of name strings. The metaclass
:class:`~mvc.support.metaclasses.ObservablePropertyMeta`
uses it to create properties.
*Value properties* have to exist as an attribute with an
initial value, which may be ``None``.
*Logical properties* require a getter and may have a setter method in
the class.
"""
"""A base class for models whose observable properties can be
changed by threads different than the (gtk) main thread. Notification is
performed by exploiting the gtk idle loop only if needed,
otherwise the standard notification system (direct method call) is
used. In this model, the observer is expected to run in the gtk
main loop thread."""
class Meta(object):
"""
A meta-data class providing some basic functionality
"""
@classmethod
def get_column_properties(cls):
if not hasattr(cls, "all_properties"):
raise RuntimeError("Meta class '%s' has not been initialized" \
" properly: 'all_properties' is not set!" % type(cls))
else:
cls._mem_columns = getattr(cls, "_mem_columns", None)
if cls._mem_columns is None:
cls._mem_columns = [(attr.label, attr.data_type) for attr in cls.all_properties if attr.tabular]
return cls._mem_columns
@classmethod
def get_local_persistent_properties(cls):
return [attr for attr in cls.properties if attr.persistent]
@classmethod
def get_viewless_properties(cls):
if not hasattr(cls, "all_properties"):
raise RuntimeError("Meta class '%s' has not been initialized" \
" properly: 'all_properties' is not set!" % type(self))
else:
return [attr for attr in cls.all_properties if not attr.visible]
@classmethod
def get_viewable_properties(cls):
if not hasattr(cls, "all_properties"):
raise RuntimeError("Meta class '%s' has not been initialized" \
" properly: 'all_properties' is not set!" % type(self))
else:
return [attr for attr in cls.all_properties if attr.visible]
pass # end of class
uuid = UUIDProperty(persistent=True, observable=False)
# ------------------------------------------------------------
# Initialization and other internals
# ------------------------------------------------------------
def __init__(self, *args, **kwargs):
super(Model, self).__init__(*args, **kwargs)
self._prop_lock = RLock() # @UndefinedVariable
self.__observers = WeakList()
self.__observer_threads = WeakKeyDictionary()
# keys are properties names, values are pairs (method,
# kwargs|None) inside the observer. kwargs is the keyword
# argument possibly specified when explicitly defining the
# notification method in observers, and it is used to build
# the NTInfo instance passed down when the notification method
# is invoked. If kwargs is None (special case), the
# notification method is "old style" (property_<name>_...) and
# won't be receiving the property name.
self.__value_notifications = {}
self.__instance_notif_before = {}
self.__instance_notif_after = {}
self.__signal_notif = {}
for attr in self.Meta.all_properties: self.register_property(attr)
return
def register_property(self, prop):
"""Registers an existing attribute to be monitored, and sets
up notifiers for notifications"""
if prop.label not in self.__value_notifications:
self.__value_notifications[prop.label] = []
pass
# registers observable wrappers
propval = getattr(type(self), prop.label)._get(self)
if isinstance(propval, ObsWrapperBase):
propval.__add_model__(self, prop.label)
if isinstance(propval, Signal):
if prop.label not in self.__signal_notif:
self.__signal_notif[prop.label] = []
pass
pass
else:
if prop.label not in self.__instance_notif_before:
self.__instance_notif_before[prop.label] = []
pass
if prop.label not in self.__instance_notif_after:
self.__instance_notif_after[prop.label] = []
pass
pass
pass
return
def has_property(self, label):
"""Returns true if given property name refers an observable
property inside self or inside derived classes."""
for prop in self.Meta.all_properties:
if prop.label == label:
return True
def register_observer(self, observer):
"""Register given observer among those observers which are
interested in observing the model."""
if observer in self.__observers: return # not already registered
assert isinstance(observer, Observer)
self.__observers.append(observer)
self.__observer_threads[observer] = threading.current_thread() # @UndefinedVariable
for prop in self.Meta.all_properties:
self.__add_observer_notification(observer, prop)
pass
return
def unregister_observer(self, observer):
"""Unregister the given observer that is no longer interested
in observing the model."""
assert isinstance(observer, Observer)
if observer not in self.__observers: return
for prop in self.Meta.all_properties:
self.__remove_observer_notification(observer, prop)
pass
self.__observers.remove(observer)
del self.__observer_threads[observer]
return
def _reset_property_notification(self, prop, old=None):
"""Called when an assignment has been done that changes the
type of a property or the instance of the property has been
changed to a different instance. In this case it must be
unregistered and registered again. Optional parameter old has
to be used when the old value is an instance (derived from
ObsWrapperBase) which needs to unregistered from the model, via
a call to method old.__remove_model__(model, prop_name)"""
# unregister_property
if isinstance(old, ObsWrapperBase):
old.__remove_model__(self, prop.label)
pass
self.register_property(prop)
for observer in self.__observers:
self.__remove_observer_notification(observer, prop)
self.__add_observer_notification(observer, prop)
pass
return
def __add_observer_notification(self, observer, prop):
"""
Find observing methods and store them for later notification.
*observer* an instance.
*label* a string.
This checks for magic names as well as methods explicitly added through
decorators or at runtime. In the latter case the type of the notification
is inferred from the number of arguments it takes.
"""
value = getattr(type(self), prop.label)._get(self)
# --- Some services ---
def getmeth(format, numargs): # @ReservedAssignment
name = format % prop.label
meth = getattr(observer, name)
args, varargs, _, _ = inspect.getargspec(meth)
if not varargs and len(args) != numargs:
logger.warn("Ignoring notification %s: exactly %d arguments"
" are expected", name, numargs)
raise AttributeError
return meth
def add_value(notification, kw=None):
pair = (notification, kw)
if pair in self.__value_notifications[prop.label]: return
logger.debug("Will call %s.%s after assignment to %s.%s",
observer.__class__.__name__, notification.__name__,
self.__class__.__name__, prop.label)
self.__value_notifications[prop.label].append(pair)
return
def add_before(notification, kw=None):
if (not isinstance(value, ObsWrapperBase) or
isinstance(value, Signal)):
return
pair = (notification, kw)
if pair in self.__instance_notif_before[prop.label]: return
logger.debug("Will call %s.%s before mutation of %s.%s",
observer.__class__.__name__, notification.__name__,
self.__class__.__name__, prop.label)
self.__instance_notif_before[prop.label].append(pair)
return
def add_after(notification, kw=None):
if (not isinstance(value, ObsWrapperBase) or
isinstance(value, Signal)):
return
pair = (notification, kw)
if pair in self.__instance_notif_after[prop.label]: return
logger.debug("Will call %s.%s after mutation of %s.%s",
observer.__class__.__name__, notification.__name__,
self.__class__.__name__, prop.label)
self.__instance_notif_after[prop.label].append(pair)
return
def add_signal(notification, kw=None):
if not isinstance(value, Signal): return
pair = (notification, kw)
if pair in self.__signal_notif[prop.label]: return
logger.debug("Will call %s.%s after emit on %s.%s",
observer.__class__.__name__, notification.__name__,
self.__class__.__name__, prop.label)
self.__signal_notif[prop.label].append(pair)
return
# ---------------------
try: notification = getmeth("property_%s_signal_emit", 3)
except AttributeError: pass
else: add_signal(notification)
try: notification = getmeth("property_%s_value_change", 4)
except AttributeError: pass
else: add_value(notification)
try: notification = getmeth("property_%s_before_change", 6)
except AttributeError: pass
else: add_before(notification)
try: notification = getmeth("property_%s_after_change", 7)
except AttributeError: pass
else: add_after(notification)
# here explicit notification methods are handled (those which
# have been statically or dynamically registered)
type_to_adding_method = {
'assign' : add_value,
'before' : add_before,
'after' : add_after,
'signal' : add_signal,
}
for meth in observer.get_observing_methods(prop.label):
added = False
kw = observer.get_observing_method_kwargs(prop.label, meth)
for flag, adding_meth in type_to_adding_method.items():
if flag in kw:
added = True
adding_meth(meth, kw)
pass
pass
if not added: raise ValueError("In %s notification method %s is "
"marked to be observing property "
"'%s', but no notification type "
"information were specified." %
(observer.__class__,
meth.__name__, prop.label))
pass
return
def __remove_observer_notification(self, observer, prop):
"""
Remove all stored notifications.
*observer* an instance.
*prop* a LabeledProperty instance.
"""
def side_effect(seq):
for meth, kw in reversed(seq):
if meth.__self__ is observer:
seq.remove((meth, kw))
yield meth
for meth in side_effect(self.__value_notifications.get(prop.label, ())):
logger.debug("Stop calling '%s' after assignment", meth.__name__)
for meth in side_effect(self.__signal_notif.get(prop.label, ())):
logger.debug("Stop calling '%s' after emit", meth.__name__)
for meth in side_effect(self.__instance_notif_before.get(prop.label, ())):
logger.debug("Stop calling '%s' before mutation", meth.__name__)
for meth in side_effect(self.__instance_notif_after.get(prop.label, ())):
logger.debug("Stop calling '%s' after mutation", meth.__name__)
return
def __notify_observer__(self, observer, method, *args, **kwargs):
"""This makes a call either through the Gtk.idle list or a
direct method call depending whether the caller's thread is
different from the observer's thread"""
assert observer in self.__observer_threads
if threading.currentThread() == self.__observer_threads[observer]: # @UndefinedVariable
self.__idle_notify_observer(observer, method, args, kwargs)
else:
add_idle_call(self.__idle_notify_observer, observer, method, args, kwargs)
def __idle_notify_observer(self, observer, method, args, kwargs):
method(*args, **kwargs)
# -------------------------------------------------------------
# Notifiers:
# -------------------------------------------------------------
def notify_property_value_change(self, prop_name, old, new):
"""
Send a notification to all registered observers.
*old* the value before the change occured.
"""
assert(prop_name in self.__value_notifications)
for method, kw in self.__value_notifications[prop_name] :
obs = method.__self__
# notification occurs checking spuriousness of the observer
if old != new or obs.accepts_spurious_change():
if kw is None: # old style call without name
self.__notify_observer__(obs, method,
self, old, new)
elif 'old_style_call' in kw: # old style call with name
self.__notify_observer__(obs, method,
self, prop_name, old, new)
else:
# New style explicit notification.
# notice that named arguments overwrite any
# existing key:val in kw, which is precisely what
# it is expected to happen
info = NTInfo('assign',
kw, model=self, prop_name=prop_name,
old=old, new=new)
self.__notify_observer__(obs, method,
self, prop_name, info)
pass
pass
pass
return
def notify_method_before_change(self, prop_name, instance, meth_name,
args, kwargs):
"""
Send a notification to all registered observers.
*instance* the object stored in the property.
*meth_name* name of the method we are about to call on *instance*.
"""
assert(prop_name in self.__instance_notif_before)
for method, kw in self.__instance_notif_before[prop_name]:
obs = method.__self__
# notifies the change
if kw is None: # old style call without name
self.__notify_observer__(obs, method,
self, instance,
meth_name, args, kwargs)
elif 'old_style_call' in kw: # old style call with name
self.__notify_observer__(obs, method,
self, prop_name, instance,
meth_name, args, kwargs)
else:
# New style explicit notification.
# notice that named arguments overwrite any
# existing key:val in kw, which is precisely what
# it is expected to happen
info = NTInfo('before',
kw,
model=self, prop_name=prop_name,
instance=instance, method_name=meth_name,
args=args, kwargs=kwargs)
self.__notify_observer__(obs, method,
self, prop_name, info)
pass
pass
return
def notify_method_after_change(self, prop_name, instance, meth_name,
res, args, kwargs):
"""
Send a notification to all registered observers.
*args* the arguments we just passed to *meth_name*.
*res* the return value of the method call.
"""
assert(prop_name in self.__instance_notif_after)
for method, kw in self.__instance_notif_after[prop_name]:
obs = method.__self__
# notifies the change
if kw is None: # old style call without name
self.__notify_observer__(obs, method,
self, instance,
meth_name, res, args, kwargs)
elif 'old_style_call' in kw: # old style call with name
self.__notify_observer__(obs, method,
self, prop_name, instance,
meth_name, res, args, kwargs)
else:
# New style explicit notification.
# notice that named arguments overwrite any
# existing key:val in kw, which is precisely what
# it is expected to happen
info = NTInfo('after',
kw,
model=self, prop_name=prop_name,
instance=instance, method_name=meth_name,
result=res, args=args, kwargs=kwargs)
self.__notify_observer__(obs, method,
self, prop_name, info)
pass
pass
return
def notify_signal_emit(self, prop_name, arg):
"""
Emit a signal to all registered observers.
*prop_name* the property storing the :class:`~mvc.observable.Signal`
instance.
*arg* one arbitrary argument passed to observing methods.
"""
assert(prop_name in self.__signal_notif)
for method, kw in self.__signal_notif[prop_name]:
obs = method.__self__
# notifies the signal emit
if kw is None: # old style call, without name
self.__notify_observer__(obs, method,
self, arg)
elif 'old_style_call' in kw: # old style call with name
self.__notify_observer__(obs, method,
self, prop_name, arg)
else:
# New style explicit notification.
# notice that named arguments overwrite any
# existing key:val in kw, which is precisely what
# it is expected to happen
info = NTInfo('signal',
kw,
model=self, prop_name=prop_name, arg=arg)
self.__notify_observer__(obs, method,
self, prop_name, info)
pass
pass
return
pass # end of class Model
# ----------------------------------------------------------------------
| 0.988281 | 1 |
setup.py | tww-software/py_gps_nmea | 0 | 88836 | <gh_stars>0
from setuptools import setup
setup(name='pygpsnmea',
version='2021.2',
description='a Python 3 GPS NMEA 0183 decoder',
author='<NAME>',
url='https://github.com/tww-software/py_gps_nmea',
license='MIT',
packages=['pygpsnmea', 'pygpsnmea.sentences', 'pygpsnmea.gui'],
install_requires=['pyserial'],
include_package_data=True,
zip_safe=False
)
| 1.242188 | 1 |
mitm.py | theikkila/kube-mitm | 0 | 88964 | <gh_stars>0
import json, sys, os
from subprocess import run
import subprocess
import time
if len(sys.argv) < 3 or sys.argv[1] in ('--help', '-h', 'help'):
print("Usage: mitm.py <ns> <service> <http|https>")
sys.exit(0)
namespace = sys.argv[1]
service = sys.argv[2]
svc_protocol = sys.argv[3]
old_svc = run(["kubectl", "-n", namespace, "get", "svc", service, "-o", "json"], stdout=subprocess.PIPE)
k = json.loads(old_svc.stdout)
md = k['metadata']
del md['annotations']
del md['creationTimestamp']
del md['resourceVersion']
del md['selfLink']
del md['uid']
# Set name for the proxy svc
orig_service_name = md['name'] + '-origin'
md['name'] = orig_service_name
spec = k['spec']
del spec['clusterIP']
# Select all ports
ports = [str(p['port']) for p in spec['ports']]
pod_ports = []
for p in spec['ports']:
pod_ports.append({
"name": p['name'],
"protocol": p['protocol'],
"containerPort": p['port'],
})
# Run pod and redirect to the port
del k['status']
print("Creating origin service to point where the service used to point...")
srv = run(["kubectl", "create", "-f", "-"], input=json.dumps(k).encode('utf8'))
def get_mitmproxy_pod_status(namespace, service):
print("==========================")
run(["kubectl", "get", "pods", "-n", namespace, "-l", "proxy=true,papp="+service])
print("==========================")
mitmproxy_podname = "mitmproxy-"+service
mitmproxy_pod = {
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"labels": {
"papp": service,
"proxy": "true"
},
"name": mitmproxy_podname,
"namespace": namespace
},
"spec": {
"containers": [
{
"env": [
{
"name": "PORTS",
"value": str(" ".join(ports))
},
{
"name": "SERVICE",
"value": "{}://{}".format(svc_protocol, orig_service_name)
}
],
"image": "theikkila/mitmp",
"imagePullPolicy": "Always",
"name": "mitmproxy",
"ports": pod_ports
}
],
"restartPolicy": "Always"
}
}
print("Spawning mitmproxy and reverse proxying origin service...")
run(["kubectl", "create", "-f", "-"], input=json.dumps(mitmproxy_pod).encode('utf8'))
print("Mitmproxy should be soon running, waiting 10s before automatic connect with following command:")
get_mitmproxy_pod_status(namespace, service)
time.sleep(5)
get_mitmproxy_pod_status(namespace, service)
time.sleep(5)
get_mitmproxy_pod_status(namespace, service)
mitm_ports = list(str(p) for p in range(45455, 45455+len(ports)))
port_mapping = {}
for m_port, l_port in zip(mitm_ports, ports):
port_mapping[l_port] = m_port
cmd = "kubectl port-forward -n {} {} {}".format(namespace, mitmproxy_podname, " ".join(mitm_ports))
# print(cmd)
proxy = subprocess.Popen(cmd, shell=True)
print("Please note, if pod isn't up you have to manually run the command in other console!")
mitm_labels = {"op":"replace", "path":"/spec/selector", "value": {"proxy":"true", "papp":service}}
orig_labels = {"op":"replace", "path":"/spec/selector", "value": spec['selector']}
ok = input("Patch the original service by pressing ENTER")
cmd = ["kubectl", "patch", "-n", namespace, "svc", service, "--type=json", "--patch={}".format(json.dumps([mitm_labels]))]
print(cmd)
run(cmd)
print("\n\n\n")
for l_port, m_port in port_mapping.items():
print("Patched service! Now you can debug by going to http://localhost:{} for port {}".format(m_port, l_port))
print("\n\n\n")
ok = input("When you are ready, return to normal by pressing ENTER")
cmd = ["kubectl", "patch", "-n", namespace, "svc", service, "--type=json", "--patch={}".format(json.dumps([orig_labels]))]
print(cmd)
run(cmd)
proxy.kill()
run(["kubectl", "delete", "-n", namespace, "pod", mitmproxy_podname])
run(["kubectl", "delete", "-n", namespace, "service", orig_service_name])
| 1.375 | 1 |
docker/python/manage.py | cn-cerc/summer-install | 4 | 89092 | <gh_stars>1-10
#!/usr/bin/python
import os
import sys
import time
masterFile = 'b1:/d/webapp/workspace/vine-app-master/vine-app/target/vine-app-1.0.0.war'
developFile = 'b1:/d/webapp/workspace/vine-app-develop/vine-app/target/vine-app-1.0.0.war'
menus = """************************* docker manage menus *************************
a1: docker ps all a2: docker stats a3: view cpu & memory
b1: docker stop all b2: docker start all b3: docker restart all
c1: reset memcached
d1: reset app[mast] d2: update app[mast] d3: check server app8101
e1: reset app[dev] e2: update app[dev] e3: check server app8201
h1: reset app task h2: reset nginx h3: vim manage.py(m)"""
class Tomcat:
def __init__(self, warfile, groups):
self.warfile = warfile
self.groups = groups;
self.maxMem = '2048m';
def reset(self):
for port in self.groups:
self.resetApp(port)
def update(self):
for port in self.groups:
self.updateApp(port)
def show(self):
sh('docker exec -it app%s /bin/bash' % self.groups[0])
def resetApp(self, port):
name = 'app%s' % port
subPath = '/d/webapp/%s' % name
sh('docker stop %s && docker rm %s' % (name, name))
sh('rm -rf %s && mkdir %s' % (subPath, subPath))
sh('mkdir %s/webapps' % subPath)
sh('cp %s %s/webapps/ROOT.war' % (self.warfile, subPath))
sh("""docker run --name %s -p %s:8080 --restart=always -m %s \
--link memcached:memcached_host \
-v %s/webapps/:/opt/tomcat/webapps/ \
-v /etc/timezone:/etc/timezone -v /etc/localtime:/etc/localtime \
-d summer/tomcat""" % (name, port, self.maxMem, subPath))
print('')
def updateApp(self, port):
app = 'app%s' % port
sh('docker stop %s' % app)
print('please wait 20s ...')
time.sleep(10)
sh('scp %s /d/webapp/%s/webapps/ROOT.war' % (self.warfile, app))
sh('rm -rf /d/webapp/%s/webapps/ROOT/' % app)
time.sleep(10)
sh('docker start %s' % app)
print('please wait 60s ...')
time.sleep(60)
print('finish')
print('')
if len(sys.argv) == 2:
choice = sys.argv[1]
else:
print(menus)
choice = input("please choice, other key to exit: ")
appMaster = Tomcat(masterFile, ['8101', '8102'])
appMaster.maxMem = '4096m'
appDevelop = Tomcat(developFile, ['8201', '8202'])
appDevelop.maxMem = '2048m'
#显示操作主菜单
def showMenus():
groups = ['8101', '8102']
if choice == "a1":
sh('docker ps -a')
elif choice == 'a2':
sh('docker stats')
elif choice == 'a3':
sh('top')
elif choice == 'b1':
sh('docker stop $(docker ps -q)')
elif choice == 'b2':
sh('docker start $(docker ps -a -q)')
elif choice == 'b3':
sh('docker restart $(docker ps -q)')
elif choice == 'c1':
sh('docker stop memcached && docker rm memcached')
sh('docker run -d --name memcached -p 11211:11211 --restart=always -m 200m memcached')
elif choice == 'd1':
appMaster.reset()
elif choice == 'd2':
appMaster.update()
elif choice == 'd3':
appMaster.show()
elif choice == 'e1':
appDevelop.reset()
elif choice == 'e2':
appDevelop.update()
elif choice == 'e3':
appDevelop.show()
elif choice == 'h1':
updateTask()
elif choice == 'h2':
nginx = NginxConfig()
nginx.start()
nginx.addServers('a1.knowall.cn', groups, 'a-group')
nginx.addServers('b.knowall.cn', groups, 'b-group')
nginx.addServers('r1.knowall.cn', groups, 'r1-group')
nginx.save()
sh('nginx -s reload')
elif choice == 'h3':
sh('vim /d/manage.py')
#建立Nginx的sever.conf文件
class NginxConfig:
def __init__(self):
self.conf = '/etc/nginx/conf.d/sever.conf'
#self.conf = 'd:\sever.conf'
def start(self):
if os.path.exists(self.conf):
os.remove(self.conf)
print("create nginx config: %s" % self.conf)
self.fc = open(self.conf, 'w')
self.fc.write('# create by NginxConfig.py')
def save(self):
self.fc.close()
def addServers(self, host, servers, group):
self.fc.write('\nupstream %s{\n' % group)
for server in servers:
self.fc.write('server localhost:%s;\n' % server)
self.fc.write("}\n")
self.fc.write("""
server {
listen 80;
server_name %s;
keepalive_timeout 90;
location / {
proxy_read_timeout 90;
proxy_pass http://%s/;
}
}""" % (host, group))
def write(self, site, port, host = 'localhost'):
self.fc.write("""
server {
listen 80;
server_name %s;
location / {
proxy_pass http://%s:%s;
}
}""" % (site, host, port))
def updateTask():
taskFile = 'b1:/d/webapp/workspace/vine-task/vine-task/target/vine-task-1.0.0.war'
sh('docker stop appTask && docker rm appTask')
sh('rm -rf /d/webapp/appTask && mkdir /d/webapp/appTask')
sh('mkdir /d/webapp/appTask/webapps')
sh('scp %s /d/webapp/appTask/webapps/ROOT.war' % taskFile)
sh("""docker run --name appTask -p 8401:8080 --restart=always -m 2048m \
--link memcached:memcached_host \
-v /d/webapp/appTask/webapps/:/opt/tomcat/webapps/ -d summer/tomcat""")
#运行命令行
def sh(cmd):
print(cmd)
os.system(cmd)
#开始运行
showMenus()
| 1.320313 | 1 |
M-SOLUTIONS/MS_B.py | consommee/AtCoder | 0 | 89220 | <reponame>consommee/AtCoder<gh_stars>0
a = int(input())
print((a-2)*180)c = input()
if c.count('x') <= 7:
print("YES")
else:
print("NO") | 2.390625 | 2 |
myuw/test/management/commands/test_memcache.py | uw-it-aca/myuw | 18 | 89348 | # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.test import TestCase
from django.core.management import call_command
class TestFlushMemcache(TestCase):
def test_run(self):
call_command('memcache', '-f')
call_command('memcache', '--flush')
call_command('memcache')
| 1.1875 | 1 |
DQM/EcalMonitorTasks/python/ecalGpuTask_cfi.py | PKUfudawei/cmssw | 1 | 89476 | import FWCore.ParameterSet.Config as cms
digiSamples_ = [1,2,3,4,5,6,7,8,9,10]
uncalibOOTAmps_ = [4,6]
ecalGpuTask = cms.untracked.PSet(
params = cms.untracked.PSet(
runGpuTask = cms.untracked.bool(False),
gpuOnlyPlots = cms.untracked.bool(True),
uncalibOOTAmps = cms.untracked.vint32(uncalibOOTAmps_)
),
MEs = cms.untracked.PSet(
# CPU Digi
DigiCpu = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT digi nDigis cpu'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(0),
high = cms.untracked.double(5000),
title = cms.untracked.string('Digis per Event')
),
description = cms.untracked.string('Number of CPU Digis per Event')
),
DigiCpuAmplitude = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT digi amplitude sample %(sample)s cpu'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
multi = cms.untracked.PSet(
sample = cms.untracked.vint32(digiSamples_)
),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(0),
high = cms.untracked.double(4096),
title = cms.untracked.string('ADC Counts')
),
description = cms.untracked.string('CPU digi amplitudes for individual digi samples (1-10)')
),
# GPU Digi (optional)
DigiGpu = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT digi nDigis gpu'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(0),
high = cms.untracked.double(5000),
title = cms.untracked.string('Digis per Event')
),
description = cms.untracked.string('Number of GPU Digis per Event')
),
DigiGpuAmplitude = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT digi amplitude sample %(sample)s gpu'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
multi = cms.untracked.PSet(
sample = cms.untracked.vint32(digiSamples_)
),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(0),
high = cms.untracked.double(4096),
title = cms.untracked.string('ADC Counts')
),
description = cms.untracked.string('GPU digi amplitudes for individual digi samples (1-10)')
),
# Digi GPU-CPU Difference
DigiGpuCpu = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT digi nDigis gpu-cpu diff'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(-500),
high = cms.untracked.double(500),
title = cms.untracked.string('GPU-CPU Digis per Event')
),
description = cms.untracked.string('GPU-CPU difference of number of Digis per Event')
),
DigiGpuCpuAmplitude = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT digi amplitude sample %(sample)s gpu-cpu diff'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
multi = cms.untracked.PSet(
sample = cms.untracked.vint32(digiSamples_)
),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(-100),
high = cms.untracked.double(100),
title = cms.untracked.string('ADC Counts')
),
description = cms.untracked.string('GPU-CPU difference of digi amplitude for individual digi samples (1-10)')
),
# CPU UncalibRecHit
UncalibCpu = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT uncalib rec hit nHits cpu'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(0),
high = cms.untracked.double(5000),
title = cms.untracked.string('Uncalibrated Rec Hits per Event')
),
description = cms.untracked.string('Number of CPU Uncalibrated Rec Hits per Event')
),
UncalibCpuAmp = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT uncalib rec hit amplitude cpu'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(0),
high = cms.untracked.double(5000),
title = cms.untracked.string('Amplitude')
),
description = cms.untracked.string('CPU Uncalibrated Rec Hit reconstructed amplitude')
),
UncalibCpuAmpError = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT uncalib rec hit amplitudeError cpu'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(0),
high = cms.untracked.double(200),
title = cms.untracked.string('Amplitude Error')
),
description = cms.untracked.string('CPU Uncalibrated Rec Hit reconstructed amplitude uncertainty')
),
UncalibCpuPedestal = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT uncalib rec hit pedestal cpu'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(0),
high = cms.untracked.double(1000),
title = cms.untracked.string('Pedestal')
),
description = cms.untracked.string('CPU Uncalibrated Rec Hit reconstructed pedestal')
),
UncalibCpuJitter = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT uncalib rec hit jitter cpu'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(-5),
high = cms.untracked.double(5),
title = cms.untracked.string('Jitter')
),
description = cms.untracked.string('CPU Uncalibrated Rec Hit reconstructed time jitter')
),
UncalibCpuJitterError = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT uncalib rec hit jitterError cpu'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(25),
low = cms.untracked.double(0),
high = cms.untracked.double(0.25), # If you edit this, also change 10k bin in GpuTask.cc
title = cms.untracked.string('Jitter Error')
),
description = cms.untracked.string('CPU Uncalibrated Rec Hit reconstructed time jitter uncertainty. 10000 is special value, shown in last bin')
),
UncalibCpuChi2 = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT uncalib rec hit chi2 cpu'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(0),
high = cms.untracked.double(200),
title = cms.untracked.string('Chi2')
),
description = cms.untracked.string('CPU Uncalibrated Rec Hit chi2 of the pulse')
),
UncalibCpuOOTAmp = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT uncalib rec hit OOT amplitude %(OOTAmp)s cpu'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
multi = cms.untracked.PSet(
OOTAmp = cms.untracked.vint32(uncalibOOTAmps_)
),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(0),
high = cms.untracked.double(500),
title = cms.untracked.string('OOT Amplitude')
),
description = cms.untracked.string('CPU Uncalibrated Rec Hit out-of-time reconstructed amplitude. Indicies go from 0 to 9, with event BX at index 5. Index 4 == BX-1, index 6 == BX+1, etc.')
),
UncalibCpuFlags = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT uncalib rec hit flags cpu'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(64),
low = cms.untracked.double(0),
high = cms.untracked.double(64),
title = cms.untracked.string('Flags')
),
description = cms.untracked.string('CPU Uncalibrated Rec Hit flag to be propagated to RecHit')
),
# GPU UncalibRecHit (optional)
UncalibGpu = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT uncalib rec hit nHits gpu'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(0),
high = cms.untracked.double(5000),
title = cms.untracked.string('Uncalibrated Rec Hits per Event')
),
description = cms.untracked.string('Number of GPU Uncalibrated Rec Hits per Event')
),
UncalibGpuAmp = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT uncalib rec hit amplitude gpu'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(0),
high = cms.untracked.double(5000),
title = cms.untracked.string('Amplitude')
),
description = cms.untracked.string('GPU Uncalibrated Rec Hit reconstructed amplitude')
),
UncalibGpuAmpError = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT uncalib rec hit amplitudeError gpu'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(0),
high = cms.untracked.double(200),
title = cms.untracked.string('Amplitude Error')
),
description = cms.untracked.string('GPU Uncalibrated Rec Hit reconstructed amplitude uncertainty')
),
UncalibGpuPedestal = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT uncalib rec hit pedestal gpu'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(0),
high = cms.untracked.double(1000),
title = cms.untracked.string('Pedestal')
),
description = cms.untracked.string('GPU Uncalibrated Rec Hit reconstructed pedestal')
),
UncalibGpuJitter = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT uncalib rec hit jitter gpu'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(-5),
high = cms.untracked.double(5),
title = cms.untracked.string('Jitter')
),
description = cms.untracked.string('GPU Uncalibrated Rec Hit reconstructed time jitter')
),
UncalibGpuJitterError = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT uncalib rec hit jitterError gpu'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(25),
low = cms.untracked.double(0),
high = cms.untracked.double(0.25), # If you edit this, also change 10k bin in GpuTask.cc
title = cms.untracked.string('Jitter Error')
),
description = cms.untracked.string('GPU Uncalibrated Rec Hit reconstructed time jitter uncertainty. 10000 is special value, shown in last bin')
),
UncalibGpuChi2 = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT uncalib rec hit chi2 gpu'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(0),
high = cms.untracked.double(200),
title = cms.untracked.string('Chi2')
),
description = cms.untracked.string('GPU Uncalibrated Rec Hit chi2 of the pulse')
),
UncalibGpuOOTAmp = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT uncalib rec hit OOT amplitude %(OOTAmp)s gpu'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
multi = cms.untracked.PSet(
OOTAmp = cms.untracked.vint32(uncalibOOTAmps_)
),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(0),
high = cms.untracked.double(500),
title = cms.untracked.string('OOT Amplitude')
),
description = cms.untracked.string('GPU Uncalibrated Rec Hit out-of-time reconstructed amplitude. Indicies go from 0 to 9, with event BX at index 5. Index 4 == BX-1, index 6 == BX+1, etc.')
),
UncalibGpuFlags = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT uncalib rec hit flags gpu'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(64),
low = cms.untracked.double(0),
high = cms.untracked.double(64),
title = cms.untracked.string('Flags')
),
description = cms.untracked.string('GPU Uncalibrated Rec Hit flag to be propagated to RecHit')
),
# UncalibRecHit GPU-CPU Difference
UncalibGpuCpu = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT uncalib rec hit nHits gpu-cpu diff'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(-500),
high = cms.untracked.double(500),
title = cms.untracked.string('GPU-CPU Uncalibrated Rec Hits per Event')
),
description = cms.untracked.string('GPU-CPU difference of number of Uncalibrated Rec Hits per Event')
),
UncalibGpuCpuAmp = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT uncalib rec hit amplitude gpu-cpu diff'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(-100),
high = cms.untracked.double(100),
title = cms.untracked.string('GPU-CPU Amplitude')
),
description = cms.untracked.string('GPU-CPU difference of Uncalibrated Rec Hit reconstructed amplitude')
),
UncalibGpuCpuAmpError = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT uncalib rec hit amplitudeError gpu-cpu diff'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(-50),
high = cms.untracked.double(50),
title = cms.untracked.string('GPU-CPU Amplitude Error')
),
description = cms.untracked.string('GPU-CPU difference of Uncalibrated Rec Hit reconstructed amplitude uncertainty')
),
UncalibGpuCpuPedestal = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT uncalib rec hit pedestal gpu-cpu diff'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(-50),
high = cms.untracked.double(50),
title = cms.untracked.string('GPU-CPU Pedestal')
),
description = cms.untracked.string('GPU-CPU difference of Uncalibrated Rec Hit reconstructed pedestal')
),
UncalibGpuCpuJitter = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT uncalib rec hit jitter gpu-cpu diff'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(-1),
high = cms.untracked.double(1),
title = cms.untracked.string('GPU-CPU Jitter')
),
description = cms.untracked.string('GPU-CPU difference of Uncalibrated Rec Hit reconstructed time jitter')
),
UncalibGpuCpuJitterError = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT uncalib rec hit jitterError gpu-cpu diff'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(-0.03),
high = cms.untracked.double(0.03),
title = cms.untracked.string('GPU-CPU Jitter Error')
),
description = cms.untracked.string('GPU-CPU difference of Uncalibrated Rec Hit reconstructed time jitter uncertainty. 10000 is special value, shown in last bin')
),
UncalibGpuCpuChi2 = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT uncalib rec hit chi2 gpu-cpu diff'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(-20),
high = cms.untracked.double(20),
title = cms.untracked.string('GPU-CPU Chi2')
),
description = cms.untracked.string('GPU-CPU difference of Uncalibrated Rec Hit chi2 of the pulse')
),
UncalibGpuCpuOOTAmp = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT uncalib rec hit OOT amplitude %(OOTAmp)s gpu-cpu diff'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
multi = cms.untracked.PSet(
OOTAmp = cms.untracked.vint32(uncalibOOTAmps_)
),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(-50),
high = cms.untracked.double(50),
title = cms.untracked.string('GPU-CPU OOT Amplitude')
),
description = cms.untracked.string('GPU-CPU difference of Uncalibrated Rec Hit out-of-time reconstructed amplitude. Indicies go from 0 to 9, with event BX at index 5. Index 4 == BX-1, index 6 == BX+1, etc.')
),
UncalibGpuCpuFlags = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT uncalib rec hit flags gpu-cpu diff'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(128),
low = cms.untracked.double(-64),
high = cms.untracked.double(64),
title = cms.untracked.string('GPU-CPU Flags')
),
description = cms.untracked.string('GPU-CPU difference of Uncalibrated Rec Hit flag to be propagated to RecHit')
),
# CPU RecHit
RecHitCpu = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT rec hit nHits cpu'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(0),
high = cms.untracked.double(5000),
title = cms.untracked.string('Rec Hits per Event')
),
description = cms.untracked.string('Number of CPU Rec Hits per Event')
),
RecHitCpuEnergy = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT rec hit energy cpu'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(0),
high = cms.untracked.double(5.0),
title = cms.untracked.string('Energy (Gev)')
),
description = cms.untracked.string('CPU Rec Hit Energy (GeV)')
),
RecHitCpuTime = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT rec hit time cpu'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(-25.0),
high = cms.untracked.double(25.0),
title = cms.untracked.string('Time (ns)')
),
description = cms.untracked.string('CPU Rec Hit Time')
),
RecHitCpuFlags = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT rec hit flags cpu'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(0),
high = cms.untracked.double(1024),
title = cms.untracked.string('Flags')
),
description = cms.untracked.string('CPU Rec Hit Flags')
),
# GPU RecHit (optional)
RecHitGpu = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT rec hit nHits gpu'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(0),
high = cms.untracked.double(5000),
title = cms.untracked.string('Rec Hits per Event')
),
description = cms.untracked.string('Number of GPU Rec Hits per Event')
),
RecHitGpuEnergy = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT rec hit energy gpu'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(0),
high = cms.untracked.double(5.0),
title = cms.untracked.string('Energy (Gev)')
),
description = cms.untracked.string('GPU Rec Hit Energy (GeV)')
),
RecHitGpuTime = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT rec hit time gpu'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(-25.0),
high = cms.untracked.double(25.0),
title = cms.untracked.string('Time (ns)')
),
description = cms.untracked.string('GPU Rec Hit Time')
),
RecHitGpuFlags = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT rec hit flags gpu'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(0),
high = cms.untracked.double(1024),
title = cms.untracked.string('Flags')
),
description = cms.untracked.string('GPU Rec Hit Flags')
),
# RecHit GPU-CPU Difference
RecHitGpuCpu = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT rec hit nHits gpu-cpu diff'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(-500),
high = cms.untracked.double(500),
title = cms.untracked.string('GPU-CPU Rec Hits per Event')
),
description = cms.untracked.string('GPU-CPU difference of number of total Rec Hits per Event')
),
RecHitGpuCpuEnergy = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT rec hit energy gpu-cpu diff'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(-1.0),
high = cms.untracked.double(1.0),
title = cms.untracked.string('GPU-CPU Energy (GeV)')
),
description = cms.untracked.string('GPU-CPU difference of Rec Hit Energy (GeV)')
),
RecHitGpuCpuTime = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT rec hit time gpu-cpu diff'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(-2.5),
high = cms.untracked.double(2.5),
title = cms.untracked.string('GPU-CPU Time (ns)')
),
description = cms.untracked.string('GPU-CPU difference of Rec Hit Time')
),
RecHitGpuCpuFlags = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sGpuTask/%(prefix)sGT rec hit flags gpu-cpu diff'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('User'),
xaxis = cms.untracked.PSet(
nbins = cms.untracked.int32(100),
low = cms.untracked.double(-1024),
high = cms.untracked.double(1024),
title = cms.untracked.string('GPU-CPU Flags')
),
description = cms.untracked.string('GPU-CPU differnece of Rec Hit Flags')
)
)
)
| 1.085938 | 1 |