Datasets:
Size:
10K - 100K
repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
pansapiens/mytardis | tardis/apps/mx_views/views.py | 3 | 2892 | from django.conf import settings
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.http import HttpResponse
from tardis.tardis_portal.auth import decorators as authz
from tardis.tardis_portal.models import Dataset
from tardis.tardis_portal.shortcuts import get_experiment_referer
from tardis.tardis_portal.shortcuts import render_response_index
@authz.dataset_access_required
def view_full_dataset(request, dataset_id):
"""Displays a MX Dataset and associated information.
Shows a full (hundreds of images) dataset its metadata and a list
of associated files with the option to show metadata of each file
and ways to download those files. With write permission this page
also allows uploading and metadata editing.
Settings for this view:
INSTALLED_APPS += ("tardis.apps.mx_views",)
DATASET_VIEWS = [("http://synchrotron.org.au/views/dataset/full",
"tardis.apps.mx_views.views.view_full_dataset"),]
"""
dataset = Dataset.objects.get(id=dataset_id)
def get_datafiles_page():
# pagination was removed by someone in the interface but not here.
# need to fix.
pgresults = 100
paginator = Paginator(dataset.datafile_set.all(), pgresults)
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
# If page request (9999) is out of range, deliver last page of results.
try:
return paginator.page(page)
except (EmptyPage, InvalidPage):
return paginator.page(paginator.num_pages)
display_images = dataset.get_images()
image_count = len(display_images)
if image_count > 4:
# take 4 evenly spaced images from the set
display_images = display_images[0::image_count / 4][:4]
upload_method = getattr(settings, "UPLOAD_METHOD", "uploadify")
c = {
'dataset': dataset,
'datafiles': get_datafiles_page(),
'parametersets': dataset.getParameterSets()
.exclude(schema__hidden=True),
'has_download_permissions':
authz.has_dataset_download_access(request, dataset_id),
'has_write_permissions':
authz.has_dataset_write(request, dataset_id),
'from_experiment': \
get_experiment_referer(request, dataset_id),
'other_experiments': \
authz.get_accessible_experiments_for_dataset(request, dataset_id),
'display_images': display_images,
'upload_method': upload_method,
'default_organization':
getattr(settings, 'DEFAULT_ARCHIVE_ORGANIZATION', 'classic'),
'default_format':
getattr(settings, 'DEFAULT_ARCHIVE_FORMATS', ['tgz', 'tar'])[0]
}
return HttpResponse(render_response_index(
request, 'mx_views/view_full_dataset.html', c))
| bsd-3-clause | -8,726,488,663,588,781,000 | 37.052632 | 79 | 0.65491 | false |
twidi/pytyrant | pytyrant.py | 1 | 14361 | """Pure python implementation of the binary Tokyo Tyrant 1.1.17 protocol
Tokyo Cabinet <http://tokyocabinet.sourceforge.net/> is a "super hyper ultra
database manager" written and maintained by Mikio Hirabayashi and released
under the LGPL.
Tokyo Tyrant is the de facto database server for Tokyo Cabinet written and
maintained by the same author. It supports a REST HTTP protocol, memcached,
and its own simple binary protocol. This library implements the full binary
protocol for the Tokyo Tyrant 1.1.17 in pure Python as defined here::
http://tokyocabinet.sourceforge.net/tyrantdoc/
Typical usage is with the PyTyrant class which provides a dict-like wrapper
for the raw Tyrant protocol::
>>> import pytyrant
>>> t = pytyrant.PyTyrant.open('127.0.0.1', 1978)
>>> t['__test_key__'] = 'foo'
>>> t.concat('__test_key__', 'bar')
>>> print t['__test_key__']
foobar
>>> del t['__test_key__']
"""
import math
import socket
import struct
import UserDict
__version__ = '1.1.17'
__all__ = [
'Tyrant', 'TyrantError', 'PyTyrant',
'RDBMONOULOG', 'RDBXOLCKREC', 'RDBXOLCKGLB',
]
class TyrantError(Exception):
pass
DEFAULT_PORT = 1978
MAGIC = 0xc8
RDBMONOULOG = 1 << 0
RDBXOLCKREC = 1 << 0
RDBXOLCKGLB = 1 << 1
class C(object):
"""
Tyrant Protocol constants
"""
put = 0x10
putkeep = 0x11
putcat = 0x12
putshl = 0x13
putnr = 0x18
out = 0x20
get = 0x30
mget = 0x31
vsiz = 0x38
iterinit = 0x50
iternext = 0x51
fwmkeys = 0x58
addint = 0x60
adddouble = 0x61
ext = 0x68
sync = 0x70
vanish = 0x71
copy = 0x72
restore = 0x73
setmst = 0x78
rnum = 0x80
size = 0x81
stat = 0x88
misc = 0x90
def _t0(code):
return [chr(MAGIC) + chr(code)]
def _t1(code, key):
return [
struct.pack('>BBI', MAGIC, code, len(key)),
key,
]
def _t1FN(code, func, opts, args):
outlst = [
struct.pack('>BBIII', MAGIC, code, len(func), opts, len(args)),
func,
]
for k in args:
outlst.extend([struct.pack('>I', len(k)), k])
return outlst
def _t1R(code, key, msec):
return [
struct.pack('>BBIQ', MAGIC, code, len(key), msec),
key,
]
def _t1M(code, key, count):
return [
struct.pack('>BBII', MAGIC, code, len(key), count),
key,
]
def _tN(code, klst):
outlst = [struct.pack('>BBI', MAGIC, code, len(klst))]
for k in klst:
outlst.extend([struct.pack('>I', len(k)), k])
return outlst
def _t2(code, key, value):
return [
struct.pack('>BBII', MAGIC, code, len(key), len(value)),
key,
value,
]
def _t2W(code, key, value, width):
return [
struct.pack('>BBIII', MAGIC, code, len(key), len(value), width),
key,
value,
]
def _t3F(code, func, opts, key, value):
return [
struct.pack('>BBIIII', MAGIC, code, len(func), opts, len(key), len(value)),
func,
key,
value,
]
def _tDouble(code, key, integ, fract):
return [
struct.pack('>BBIQQ', MAGIC, code, len(key), integ, fract),
key,
]
def socksend(sock, lst):
sock.sendall(''.join(lst))
def sockrecv(sock, bytes):
d = ''
while len(d) < bytes:
c = sock.recv(min(8192, bytes - len(d)))
if not c:
raise TyrantError('Connection closed')
d += c
return d
def socksuccess(sock):
fail_code = ord(sockrecv(sock, 1))
if fail_code:
raise TyrantError(fail_code)
def socklen(sock):
return struct.unpack('>I', sockrecv(sock, 4))[0]
def socklong(sock):
return struct.unpack('>Q', sockrecv(sock, 8))[0]
def sockstr(sock):
return sockrecv(sock, socklen(sock))
def sockdouble(sock):
intpart, fracpart = struct.unpack('>QQ', sockrecv(sock, 16))
return intpart + (fracpart * 1e-12)
def sockstrpair(sock):
klen = socklen(sock)
vlen = socklen(sock)
k = sockrecv(sock, klen)
v = sockrecv(sock, vlen)
return k, v
class PyTyrant(object, UserDict.DictMixin):
"""
Dict-like proxy for a Tyrant instance
"""
@classmethod
def open(cls, *args, **kw):
return cls(Tyrant.open(*args, **kw))
def __init__(self, t):
self.t = t
def __repr__(self):
# The __repr__ for UserDict.DictMixin isn't desirable
# for a large KV store :)
return object.__repr__(self)
def has_key(self, key):
return key in self
def __contains__(self, key):
try:
self.t.vsiz(key)
except TyrantError:
return False
else:
return True
def setdefault(self, key, value):
try:
self.t.putkeep(key, value)
except TyrantError:
return self[key]
return value
def __setitem__(self, key, value):
self.t.put(key, value)
def __getitem__(self, key):
try:
return self.t.get(key)
except TyrantError:
raise KeyError(key)
def __delitem__(self, key):
try:
self.t.out(key)
except TyrantError:
raise KeyError(key)
def __iter__(self):
return self.iterkeys()
def iterkeys(self):
self.t.iterinit()
try:
while True:
yield self.t.iternext()
except TyrantError:
pass
def keys(self):
return list(self.iterkeys())
def __len__(self):
return self.t.rnum()
def clear(self):
self.t.vanish()
def update(self, other=None, **kwargs):
# Make progressively weaker assumptions about "other"
if other is None:
pass
elif hasattr(other, 'iteritems'):
self.multi_set(other.iteritems())
elif hasattr(other, 'keys'):
self.multi_set([(k, other[k]) for k in other.keys()])
else:
self.multi_set(other)
if kwargs:
self.update(kwargs)
def multi_del(self, keys, no_update_log=False):
opts = (no_update_log and RDBMONOULOG or 0)
if not isinstance(keys, (list, tuple)):
keys = list(keys)
self.t.misc("outlist", opts, keys)
def multi_get(self, keys, no_update_log=False):
opts = (no_update_log and RDBMONOULOG or 0)
if not isinstance(keys, (list, tuple)):
keys = list(keys)
rval = self.t.misc("getlist", opts, keys)
if len(rval) <= len(keys):
# 1.1.10 protocol, may return invalid results
if len(rval) < len(keys):
raise KeyError("Missing a result, unusable response in 1.1.10")
return rval
# 1.1.11 protocol returns interleaved key, value list
d = dict((rval[i], rval[i + 1]) for i in xrange(0, len(rval), 2))
return map(d.get, keys)
def multi_set(self, items, no_update_log=False):
opts = (no_update_log and RDBMONOULOG or 0)
lst = []
for k, v in items:
lst.extend((k, v))
self.t.misc("putlist", opts, lst)
def call_func(self, func, key, value, record_locking=False, global_locking=False):
opts = (
(record_locking and RDBXOLCKREC or 0) |
(global_locking and RDBXOLCKGLB or 0))
return self.t.ext(func, opts, key, value)
def get_size(self, key):
try:
return self.t.vsiz(key)
except TyrantError:
raise KeyError(key)
def get_stats(self):
return dict(l.split('\t', 1) for l in self.t.stat().splitlines() if l)
def prefix_keys(self, prefix, maxkeys=None):
if maxkeys is None:
maxkeys = len(self)
return self.t.fwmkeys(prefix, maxkeys)
def concat(self, key, value, width=None):
if width is None:
self.t.putcat(key, value)
else:
self.t.putshl(key, value, width)
def sync(self):
self.t.sync()
def close(self):
self.t.close()
class Tyrant(object):
@classmethod
def open(cls, host='127.0.0.1', port=DEFAULT_PORT, timeout=3.0):
sock = socket.socket()
sock.settimeout(timeout)
sock.connect((host, port))
sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
return cls(sock)
def __init__(self, sock):
self.sock = sock
def close(self):
self.sock.close()
def put(self, key, value):
"""Unconditionally set key to value
"""
socksend(self.sock, _t2(C.put, key, value))
socksuccess(self.sock)
def putkeep(self, key, value):
"""Set key to value if key does not already exist
"""
socksend(self.sock, _t2(C.putkeep, key, value))
socksuccess(self.sock)
def putcat(self, key, value):
"""Append value to the existing value for key, or set key to
value if it does not already exist
"""
socksend(self.sock, _t2(C.putcat, key, value))
socksuccess(self.sock)
def putshl(self, key, value, width):
"""Equivalent to::
self.putcat(key, value)
self.put(key, self.get(key)[-width:])
"""
socksend(self.sock, _t2W(C.putshl, key, value, width))
socksuccess(self.sock)
def putnr(self, key, value):
"""Set key to value without waiting for a server response
"""
socksend(self.sock, _t2(C.putnr, key, value))
def out(self, key):
"""Remove key from server
"""
socksend(self.sock, _t1(C.out, key))
socksuccess(self.sock)
def get(self, key):
"""Get the value of a key from the server
"""
socksend(self.sock, _t1(C.get, key))
socksuccess(self.sock)
return sockstr(self.sock)
def _mget(self, klst):
socksend(self.sock, _tN(C.mget, klst))
socksuccess(self.sock)
numrecs = socklen(self.sock)
for i in xrange(numrecs):
k, v = sockstrpair(self.sock)
yield k, v
def mget(self, klst):
"""Get key,value pairs from the server for the given list of keys
"""
return list(self._mget(klst))
def vsiz(self, key):
"""Get the size of a value for key
"""
socksend(self.sock, _t1(C.vsiz, key))
socksuccess(self.sock)
return socklen(self.sock)
def iterinit(self):
"""Begin iteration over all keys of the database
"""
socksend(self.sock, _t0(C.iterinit))
socksuccess(self.sock)
def iternext(self):
"""Get the next key after iterinit
"""
socksend(self.sock, _t0(C.iternext))
socksuccess(self.sock)
return sockstr(self.sock)
def _fwmkeys(self, prefix, maxkeys):
socksend(self.sock, _t1M(C.fwmkeys, prefix, maxkeys))
socksuccess(self.sock)
numkeys = socklen(self.sock)
for i in xrange(numkeys):
yield sockstr(self.sock)
def fwmkeys(self, prefix, maxkeys):
"""Get up to the first maxkeys starting with prefix
"""
return list(self._fwmkeys(prefix, maxkeys))
def addint(self, key, num):
socksend(self.sock, _t1M(C.addint, key, num))
socksuccess(self.sock)
return socklen(self.sock)
def adddouble(self, key, num):
fracpart, intpart = math.modf(num)
fracpart, intpart = int(fracpart * 1e12), int(intpart)
socksend(self.sock, _tDouble(C.adddouble, key, fracpart, intpart))
socksuccess(self.sock)
return sockdouble(self.sock)
def ext(self, func, opts, key, value):
# tcrdbext opts are RDBXOLCKREC, RDBXOLCKGLB
"""Call func(key, value) with opts
opts is a bitflag that can be RDBXOLCKREC for record locking
and/or RDBXOLCKGLB for global locking"""
socksend(self.sock, _t3F(C.ext, func, opts, key, value))
socksuccess(self.sock)
return sockstr(self.sock)
def sync(self):
"""Synchronize the database
"""
socksend(self.sock, _t0(C.sync))
socksuccess(self.sock)
def vanish(self):
"""Remove all records
"""
socksend(self.sock, _t0(C.vanish))
socksuccess(self.sock)
def copy(self, path):
"""Hot-copy the database to path
"""
socksend(self.sock, _t1(C.copy, path))
socksuccess(self.sock)
def restore(self, path, msec):
"""Restore the database from path at timestamp (in msec)
"""
socksend(self.sock, _t1R(C.copy, path, msec))
socksuccess(self.sock)
def setmst(self, host, port):
"""Set master to host:port
"""
socksend(self.sock, _t1M(C.setmst, host, port))
socksuccess(self.sock)
def rnum(self):
"""Get the number of records in the database
"""
socksend(self.sock, _t0(C.rnum))
socksuccess(self.sock)
return socklong(self.sock)
def size(self):
"""Get the size of the database
"""
socksend(self.sock, _t0(C.size))
socksuccess(self.sock)
return socklong(self.sock)
def stat(self):
"""Get some statistics about the database
"""
socksend(self.sock, _t0(C.stat))
socksuccess(self.sock)
return sockstr(self.sock)
def _misc(self, func, opts, args):
# tcrdbmisc opts are RDBMONOULOG
socksend(self.sock, _t1FN(C.misc, func, opts, args))
try:
socksuccess(self.sock)
finally:
numrecs = socklen(self.sock)
for i in xrange(numrecs):
yield sockstr(self.sock)
def misc(self, func, opts, args):
"""All databases support "putlist", "outlist", and "getlist".
"putlist" is to store records. It receives keys and values one after the other, and returns an empty list.
"outlist" is to remove records. It receives keys, and returns an empty list.
"getlist" is to retrieve records. It receives keys, and returns values.
Table database supports "setindex", "search", "genuid".
opts is a bitflag that can be RDBMONOULOG to prevent writing to the update log
"""
return list(self._misc(func, opts, args))
def main():
import doctest
doctest.testmod()
if __name__ == '__main__':
main()
| mit | -5,985,833,604,781,468,000 | 25.110909 | 114 | 0.572871 | false |
HonzaKral/curator | test_curator/integration/test_time_based.py | 1 | 1872 | from datetime import datetime, timedelta
import curator
from . import CuratorTestCase
class TestTimeBasedDeletion(CuratorTestCase):
def test_curator_will_properly_delete_indices(self):
self.create_indices(10)
self.run_curator(delete_older=3)
mtd = self.client.cluster.state(index=self.args['prefix'] + '*', metric='metadata')
self.assertEquals(4, len(mtd['metadata']['indices'].keys()))
def test_curator_will_properly_delete_hourly_indices(self):
self.create_indices(10, 'hours')
self.run_curator(delete_older=3, time_unit='hours')
mtd = self.client.cluster.state(index=self.args['prefix'] + '*', metric='metadata')
self.assertEquals(4, len(mtd['metadata']['indices'].keys()))
class TestFindExpiredIndices(CuratorTestCase):
def test_find_indices_ignores_indices_with_different_prefix_or_time_unit(self):
self.create_index('logstash-2012.01.01') # wrong precision
self.create_index('not-logstash-2012.01.01.00') # wrong prefix
self.create_index('logstash-2012.01.01.00')
expired = list(curator.find_expired_indices(self.client, 'hours', 1))
self.assertEquals(1, len(expired))
self.assertEquals('logstash-2012.01.01.00', expired[0][0])
def test_find_reports_correct_time_interval_from_cutoff(self):
self.create_index('l-2014.01.01')
self.create_index('l-2014.01.02')
# yesterday is always save since we reset to mignight and do <, not <=
self.create_index('l-2014.01.03')
expired = list(curator.find_expired_indices(self.client, 'days', 1,
utc_now=datetime(2014, 1, 4, 3, 45, 50), prefix='l-'))
self.assertEquals(
[
(u'l-2014.01.01', timedelta(2)),
(u'l-2014.01.02', timedelta(1))
],
expired
)
| apache-2.0 | -1,606,032,251,548,790,800 | 40.6 | 91 | 0.634615 | false |
chaubold/hytra | tests/core/test_conflictingsegmentations.py | 1 | 5839 | from __future__ import print_function, absolute_import, nested_scopes, generators, division, with_statement, unicode_literals
import logging
from hytra.core.ilastik_project_options import IlastikProjectOptions
from hytra.jst.conflictingsegmentsprobabilitygenerator import ConflictingSegmentsProbabilityGenerator
from hytra.core.ilastikhypothesesgraph import IlastikHypothesesGraph
from hytra.core.fieldofview import FieldOfView
try:
import multiHypoTracking_with_cplex as mht
except ImportError:
try:
import multiHypoTracking_with_gurobi as mht
except ImportError:
mht = None
import dpct
def constructFov(shape, t0, t1, scale=[1, 1, 1]):
[xshape, yshape, zshape] = shape
[xscale, yscale, zscale] = scale
fov = FieldOfView(t0, 0, 0, 0, t1, xscale * (xshape - 1), yscale * (yshape - 1),
zscale * (zshape - 1))
return fov
# def test_twoSegmentations():
# # set up ConflictingSegmentsProbabilityGenerator
# ilpOptions = IlastikProjectOptions()
# ilpOptions.divisionClassifierPath = None
# ilpOptions.divisionClassifierFilename = None
# ilpOptions.rawImageFilename = 'tests/multiSegmentationHypothesesTestDataset/Raw.h5'
# ilpOptions.rawImagePath = 'exported_data'
# ilpOptions.rawImageAxes = 'txyzc'
# ilpOptions.labelImageFilename = 'tests/multiSegmentationHypothesesTestDataset/segmentation.h5'
# ilpOptions.objectCountClassifierFilename = 'tests/multiSegmentationHypothesesTestDataset/tracking.ilp'
# additionalLabelImageFilenames = ['tests/multiSegmentationHypothesesTestDataset/segmentationAlt.h5']
# additionalLabelImagePaths = [ilpOptions.labelImagePath]
# probabilityGenerator = ConflictingSegmentsProbabilityGenerator(
# ilpOptions,
# additionalLabelImageFilenames,
# additionalLabelImagePaths,
# useMultiprocessing=False,
# verbose=False)
# probabilityGenerator.fillTraxels(usePgmlink=False)
# assert(len(probabilityGenerator.TraxelsPerFrame[0]) == 4)
# assert(len(probabilityGenerator.TraxelsPerFrame[1]) == 3)
# assert(len(probabilityGenerator.TraxelsPerFrame[2]) == 3)
# assert(len(probabilityGenerator.TraxelsPerFrame[3]) == 4)
# filenamesPerTraxel = [t.segmentationFilename for t in probabilityGenerator.TraxelsPerFrame[3].values()]
# idsPerTraxel = [t.idInSegmentation for t in probabilityGenerator.TraxelsPerFrame[3].values()]
# assert(idsPerTraxel.count(1) == 2)
# assert(idsPerTraxel.count(2) == 2)
# assert(filenamesPerTraxel.count('tests/multiSegmentationHypothesesTestDataset/segmentation.h5') == 2)
# assert(filenamesPerTraxel.count('tests/multiSegmentationHypothesesTestDataset/segmentationAlt.h5') == 2)
# # build hypotheses graph, check that conflicting traxels are properly detected
# fieldOfView = constructFov(probabilityGenerator.shape,
# probabilityGenerator.timeRange[0],
# probabilityGenerator.timeRange[1],
# [probabilityGenerator.x_scale,
# probabilityGenerator.y_scale,
# probabilityGenerator.z_scale])
# hypotheses_graph = IlastikHypothesesGraph(
# probabilityGenerator=probabilityGenerator,
# timeRange=probabilityGenerator.timeRange,
# maxNumObjects=1,
# numNearestNeighbors=2,
# fieldOfView=fieldOfView,
# withDivisions=False,
# divisionThreshold=0.1
# )
# assert(hypotheses_graph.countNodes() == 14)
# assert(hypotheses_graph.countArcs() == 23)
# assert(hypotheses_graph._graph.node[(0, 1)]['traxel'].conflictingTraxelIds == [3])
# assert(hypotheses_graph._graph.node[(0, 3)]['traxel'].conflictingTraxelIds == [1])
# assert(hypotheses_graph._graph.node[(0, 2)]['traxel'].conflictingTraxelIds == [4])
# assert(hypotheses_graph._graph.node[(0, 4)]['traxel'].conflictingTraxelIds == [2])
# assert(hypotheses_graph._graph.node[(1, 1)]['traxel'].conflictingTraxelIds == [2, 3])
# assert(hypotheses_graph._graph.node[(1, 2)]['traxel'].conflictingTraxelIds == [1])
# assert(hypotheses_graph._graph.node[(1, 3)]['traxel'].conflictingTraxelIds == [1])
# # track, but check that the right exclusion constraints are present
# hypotheses_graph.insertEnergies()
# trackingGraph = hypotheses_graph.toTrackingGraph()
# assert(len(trackingGraph.model['exclusions']) == 8)
# for exclusionSet in trackingGraph.model['exclusions']:
# assert(len(exclusionSet) == 2)
# # use multiHypoTracking, insert exclusion constraints!
# if mht is not None:
# result = mht.track(trackingGraph.model, {"weights": [10, 10, 500, 500]})
# else:
# return
# # standard dpct cannot handle exclusion constraints yet
# result = dpct.trackFlowBased(trackingGraph.model, {"weights": [10, 10, 500, 500]})
# hypotheses_graph.insertSolution(result)
# # hypotheses_graph.computeLineage()
# numActivePerFrame = {}
# for node in hypotheses_graph.nodeIterator():
# timeframe = node[0]
# if 'value' in hypotheses_graph._graph.node[node]:
# value = hypotheses_graph._graph.node[node]['value']
# else:
# value = 0
# numActivePerFrame.setdefault(timeframe, []).append(value)
# for _, v in numActivePerFrame.items():
# assert(sum(v) == 2)
# edgeFlow = 0
# for edge in hypotheses_graph.arcIterator():
# if 'value' in hypotheses_graph._graph.edge[edge[0]][edge[1]]:
# edgeFlow += hypotheses_graph._graph.edge[edge[0]][edge[1]]['value']
# assert(edgeFlow == 6)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
test_twoSegmentations()
| mit | -4,230,047,409,174,636,000 | 43.572519 | 125 | 0.679911 | false |
makelove/OpenCV-Python-Tutorial | ch21-轮廓Contours/21-findContour.py | 1 | 1096 | # -*- coding: utf-8 -*-
import numpy as np
import cv2
# im = cv2.imread('test.jpg')#
# im = cv2.imread('poker5hearts.jpg')#
# im = cv2.imread('../data/black-white-rect.png')#contour.jpg #
im = cv2.imread('../data/chessboard.jpeg')
imgray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
cv2.imshow("imgray", imgray)
#需要注意的是cv2.findContours()函数接受的参数为二值图,即黑白的(不是灰度图)
# 所以读取的图像要先转成灰度的,再转成二值图
# ret, thresh = cv2.threshold(imgray, 0, 25, 0)
# ret, thresh = cv2.threshold(imgray, 0, 100, 0)
ret, thresh = cv2.threshold(src=imgray, thresh=127, maxval=255, type=cv2.THRESH_BINARY)#src, thresh, maxval, type
cv2.imshow("thresh", thresh)
#轮廓提取模式 Contour_Retrieval_Mode
image, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
print("contours size: ", len(contours))
img = cv2.drawContours(im, contours, -1, (0,255,0), 3)
# img = cv2.drawContours(im, contours, 3, (255, 0, 0), 3)
cv2.namedWindow("contour.jpg", 0)
cv2.imshow("contour.jpg", img)
cv2.waitKey(0)
| mit | -2,430,169,206,289,489,000 | 31.8 | 113 | 0.703252 | false |
llllllllll/codetransformer | codetransformer/tests/test_code.py | 1 | 5761 | from dis import dis
from io import StringIO
from itertools import product, chain
import random
import sys
import pytest
from codetransformer.code import Code, Flag, pycode
from codetransformer.instructions import LOAD_CONST, LOAD_FAST, uses_free
@pytest.fixture(scope='module')
def sample_flags(request):
random.seed(8025816322119661921) # ayy lmao
nflags = len(Flag.__members__)
return tuple(
dict(zip(Flag.__members__.keys(), case)) for case in chain(
random.sample(list(product((True, False), repeat=nflags)), 1000),
[[True] * nflags],
[[False] * nflags],
)
)
def test_lnotab_roundtrip():
# DO NOT ADD EXTRA LINES HERE
def f(): # pragma: no cover
a = 1
b = 2
c = 3
d = 4
a, b, c, d
start_line = test_lnotab_roundtrip.__code__.co_firstlineno + 3
lines = [start_line + n for n in range(5)]
code = Code.from_pycode(f.__code__)
lnotab = code.lnotab
assert lnotab.keys() == set(lines)
assert isinstance(lnotab[lines[0]], LOAD_CONST)
assert lnotab[lines[0]].arg == 1
assert isinstance(lnotab[lines[1]], LOAD_CONST)
assert lnotab[lines[1]].arg == 2
assert isinstance(lnotab[lines[2]], LOAD_CONST)
assert lnotab[lines[2]].arg == 3
assert isinstance(lnotab[lines[3]], LOAD_CONST)
assert lnotab[lines[3]].arg == 4
assert isinstance(lnotab[lines[4]], LOAD_FAST)
assert lnotab[lines[4]].arg == 'a'
assert f.__code__.co_lnotab == code.py_lnotab == code.to_pycode().co_lnotab
def test_lnotab_really_dumb_whitespace():
ns = {}
exec('def f():\n lol = True' + '\n' * 1024 + ' wut = True', ns)
f = ns['f']
code = Code.from_pycode(f.__code__)
lines = [2, 1026]
lnotab = code.lnotab
assert lnotab.keys() == set(lines)
assert isinstance(lnotab[lines[0]], LOAD_CONST)
assert lnotab[lines[0]].arg
assert isinstance(lnotab[lines[1]], LOAD_CONST)
assert lnotab[lines[1]].arg
assert f.__code__.co_lnotab == code.py_lnotab == code.to_pycode().co_lnotab
def test_flag_packing(sample_flags):
for flags in sample_flags:
assert Flag.unpack(Flag.pack(**flags)) == flags
def test_flag_unpack_too_big():
assert all(Flag.unpack(Flag.max).values())
with pytest.raises(ValueError):
Flag.unpack(Flag.max + 1)
def test_flag_max():
assert Flag.pack(
CO_OPTIMIZED=True,
CO_NEWLOCALS=True,
CO_VARARGS=True,
CO_VARKEYWORDS=True,
CO_NESTED=True,
CO_GENERATOR=True,
CO_NOFREE=True,
CO_COROUTINE=True,
CO_ITERABLE_COROUTINE=True,
CO_FUTURE_DIVISION=True,
CO_FUTURE_ABSOLUTE_IMPORT=True,
CO_FUTURE_WITH_STATEMENT=True,
CO_FUTURE_PRINT_FUNCTION=True,
CO_FUTURE_UNICODE_LITERALS=True,
CO_FUTURE_BARRY_AS_BDFL=True,
CO_FUTURE_GENERATOR_STOP=True,
) == Flag.max
def test_flag_max_immutable():
with pytest.raises(AttributeError):
Flag.CO_OPTIMIZED.max = None
def test_code_multiple_varargs():
with pytest.raises(ValueError) as e:
Code(
(), (
'*args',
'*other',
),
)
assert str(e.value) == 'cannot specify *args more than once'
def test_code_multiple_kwargs():
with pytest.raises(ValueError) as e:
Code(
(), (
'**kwargs',
'**kwargs',
),
)
assert str(e.value) == 'cannot specify **kwargs more than once'
@pytest.mark.parametrize('cls', uses_free)
def test_dangling_var(cls):
instr = cls('dangling')
with pytest.raises(ValueError) as e:
Code((instr,))
assert (
str(e.value) ==
"Argument to %r is not in cellvars or freevars." % instr
)
def test_code_flags(sample_flags):
attr_map = {
'CO_NESTED': 'is_nested',
'CO_GENERATOR': 'is_generator',
'CO_COROUTINE': 'is_coroutine',
'CO_ITERABLE_COROUTINE': 'is_iterable_coroutine',
'CO_NEWLOCALS': 'constructs_new_locals',
}
for flags in sample_flags:
if sys.version_info < (3, 6):
codestring = b'd\x00\x00S' # return None
else:
codestring = b'd\x00S' # return None
code = Code.from_pycode(pycode(
argcount=0,
kwonlyargcount=0,
nlocals=2,
stacksize=0,
flags=Flag.pack(**flags),
codestring=codestring,
constants=(None,),
names=(),
varnames=('a', 'b'),
filename='',
name='',
firstlineno=0,
lnotab=b'',
))
assert code.flags == flags
for flag, attr in attr_map.items():
if flags[flag]:
assert getattr(code, attr)
@pytest.fixture
def abc_code():
a = LOAD_CONST('a')
b = LOAD_CONST('b')
c = LOAD_CONST('c') # not in instrs
code = Code((a, b), argnames=())
return (a, b, c), code
def test_instr_index(abc_code):
(a, b, c), code = abc_code
assert code.index(a) == 0
assert code.index(b) == 1
with pytest.raises(ValueError):
code.index(c)
def test_code_contains(abc_code):
(a, b, c), code = abc_code
assert a in code
assert b in code
assert c not in code
def test_code_dis(capsys):
@Code.from_pyfunc
def code(): # pragma: no cover
a = 1
b = 2
return a, b
buf = StringIO()
dis(code.to_pycode(), file=buf)
expected = buf.getvalue()
code.dis()
out, err = capsys.readouterr()
assert not err
assert out == expected
buf = StringIO()
code.dis(file=buf)
assert buf.getvalue() == expected
| gpl-2.0 | -3,448,919,409,673,295,000 | 24.95045 | 79 | 0.571949 | false |
Azure/azure-sdk-for-python | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/locks/v2015_01_01/models/_management_lock_client_enums.py | 1 | 1363 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class LockLevel(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The lock level of the management lock.
"""
NOT_SPECIFIED = "NotSpecified"
CAN_NOT_DELETE = "CanNotDelete"
READ_ONLY = "ReadOnly"
| mit | 8,379,623,869,956,419,000 | 37.942857 | 94 | 0.599413 | false |
jvs/sourcer | tests/test_salesforce.py | 1 | 5119 | from sourcer import Grammar
# This is work in progress.
# See: https://help.salesforce.com/articleView?id=customize_functions.htm&type=5
g = Grammar(r'''
```
import ast
```
start = Expression
Expression = OperatorPrecedence(
Atom | "(" >> Expression << ")",
Postfix(ArgumentList | FieldAccess),
Prefix("-" | "+" | "!"),
RightAssoc("^"),
LeftAssoc("*" | "/"),
LeftAssoc("+" | "-" | "&"),
NonAssoc("<=" | "<" | ">=" | ">"),
NonAssoc("!=" | "<>" | "==" | "="),
LeftAssoc("&&"),
LeftAssoc("||"),
)
class ArgumentList {
arguments: "(" >> (Expression /? ",") << ")"
}
class FieldAccess {
field: "." >> Word
}
Atom = Global | Identifier | Rational | Integer | String
class Global {
name: "$" >> Word
}
class Identifier {
name: Word
}
# ASK: What is the real syntax for these things?
Word = /[_a-zA-Z][_a-zA-Z0-9]*/
Rational = /(\d+\.\d*)|(\d*\.\d+)/ |> `float`
Integer = /\d+/ |> `int`
StringLiteral = /("([^"\\]|\\.)*")/ | /('([^'\\]|\\.)*')/
# For now, just use ast module to evaluate string literals.
class String {
value: StringLiteral |> `ast.literal_eval`
}
ignore /\s+/
''', include_source=True)
aliases = {
'=': '==',
'<>': '!=',
}
constants = {
'NULL': None,
'TRUE': True,
'FALSE': False,
}
# Incomplete collection of evaluators.
evaluators = {
'*': lambda x, y: x * y if x is not None and y is not None else None,
'/': lambda x, y: x / y if x is not None and y is not None else None,
'+': lambda x, y: x + y if x is not None and y is not None else None,
'-': lambda x, y: x - y if x is not None and y is not None else None,
'==': lambda x, y: x == y,
'!=': lambda x, y: x != y,
'&&': lambda x, y: x and y,
'||': lambda x, y: x or y,
'>': lambda x, y: x > y if x is not None and y is not None else False,
'<': lambda x, y: x < y if x is not None and y is not None else False,
'>=': lambda x, y: x >= y if x is not None and y is not None else False,
'<=': lambda x, y: x <= y if x is not None and y is not None else False,
'AND': lambda *a: all(a),
'CONTAINS': lambda x, y: str(y) in str(x) if x is not None else True,
'IF': lambda x, y, z: y if x else z,
'ISBLANK': lambda x: x is None,
'LOG': lambda x: log10(x) if x is not None else None,
'MAX': lambda *a: max(*a),
'MIN': lambda *a: min(*a),
'MOD': lambda x, y: (x % y) if x is not None and y is not None else None,
'NOT': lambda x: not(x),
'OR': lambda *a: any(a),
'SQRT': lambda x: sqrt(x) if x is not None else None,
'TEXT': lambda x: str(x),
}
def evaluate(node, bindings):
# Look up identifiers.
if isinstance(node, g.Identifier):
if node.name in bindings:
return bindings[node.name]
name = node.name.upper()
return bindings.get(name, name)
# Look up fields.
if isinstance(node, g.Postfix) and isinstance(node.operator, g.FieldAccess):
obj, field = node.left, node.operator.field
if hasattr(obj, field):
return getattr(obj, field)
elif isinstance(obj, dict):
return obj.get(field)
else:
return node
# Evaluate function calls and operators.
if isinstance(node, g.Infix):
x, func, y = node.left, node.operator, node.right
args = (x, y)
elif isinstance(node, g.Postfix) and isinstance(node.operator, g.ArgumentList):
func, args = node.left, node.operator.arguments
else:
return node
# Check if we're using an alias.
func = aliases.get(func, func)
if func in evaluators:
return evaluators[func](*args)
else:
return node
def run(formula, bindings=None):
updated_bindings = dict(constants)
updated_bindings.update(bindings or {})
tree = g.parse(formula)
return g.transform(tree, lambda node: evaluate(node, updated_bindings))
def test_some_simple_formulas():
result = run('1 + 2 * 3')
assert result == 7
result = run('foo == bar && fiz == buz', bindings={
'foo': 1, 'bar': 1, 'fiz': 2, 'buz': 2,
})
assert result == True
result = run('foo == bar && fiz == buz', bindings={
'foo': 1, 'bar': 1, 'fiz': 2, 'buz': 3,
})
assert result == False
result = run('1 <= 2 && (false || true)')
assert result == True # Explicitly compare to True.
result = run('1 > 2 || (true && false)')
assert result == False # Explicitly compare to False.
result = run('foo != bar', bindings={'foo': 10, 'bar': 10})
assert not result
result = run('foo != bar', bindings={'foo': 1, 'bar': 2})
assert result
result = run('foo.bar', bindings={'foo': {'bar': 10}})
assert result == 10
result = run('foo.bar.baz', bindings={'foo': {'bar': {'baz': 100}}})
assert result == 100
result = run('MIN(20, 10, 30)')
assert result == 10
result = run('MIN(20, 10, 30) + MAX(11, 12, 13)')
assert result == 23
| mit | -7,452,110,781,552,088,000 | 27.126374 | 83 | 0.540535 | false |
lsaffre/timtools | timtools/sdoc/feeders.py | 1 | 1705 | ## Copyright 2003-2009 Luc Saffre
## This file is part of the TimTools project.
## TimTools is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
## TimTools is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with TimTools; if not, see <http://www.gnu.org/licenses/>.
import re
def plain2xml(txt):
txt = txt.replace("&","&")
txt = txt.replace("<","<")
return txt
memocommands = (
( re.compile('\[url\s+(\S+)\s*(.*?)\]',re.DOTALL),
lambda m : '<b>'+m.group(2)+'</b> (<i>' + m.group(1)+ '</i>)'),
)
# urlfind =
# urlrepl = re.compile('<b>\2</b> (<u>\1</u>)')
# def urlrepl(m):
def memo2xml(txt):
txt = plain2xml(txt)
txt = txt.replace('[B]','<b>')
txt = txt.replace('[b]','</b>')
txt = txt.replace('[U]','<u>')
txt = txt.replace('[u]','</u>')
for find,repl in memocommands:
txt = re.sub(find,repl,txt)
return txt
def rst2xml(txt):
raise "doesn't work"
import docutils.parsers.rst
import docutils.utils
parser = docutils.parsers.rst.Parser()
doc = docutils.utils.new_document("feed")
parser.parse(txt, doc)
raise "and now?"
_feeders={
'xml' : lambda x : x,
'plain' : plain2xml,
'rst' : rst2xml,
'memo' : memo2xml,
}
def getFeeder(name):
return _feeders[name]
| bsd-2-clause | 5,341,080,861,915,417,000 | 27.416667 | 71 | 0.63871 | false |
souravbadami/zulip | zerver/views/home.py | 1 | 17065 | from __future__ import absolute_import
from typing import Any, List, Dict, Optional, Text
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse, HttpRequest
from django.shortcuts import redirect
from django.utils import translation
from django.utils.cache import patch_cache_control
from six.moves import zip_longest, zip, range
from zerver.decorator import zulip_login_required, process_client
from zerver.forms import ToSForm
from zerver.lib.realm_icon import realm_icon_url
from zerver.models import Message, UserProfile, Stream, Subscription, Huddle, \
Recipient, Realm, UserMessage, DefaultStream, RealmEmoji, RealmAlias, \
RealmFilter, PreregistrationUser, UserActivity, \
UserPresence, get_recipient, name_changes_disabled, email_to_username, \
list_of_domains_for_realm
from zerver.lib.events import do_events_register
from zerver.lib.actions import update_user_presence, do_change_tos_version, \
do_update_pointer, get_cross_realm_dicts, realm_user_count
from zerver.lib.avatar import avatar_url
from zerver.lib.i18n import get_language_list, get_language_name, \
get_language_list_for_templates
from zerver.lib.push_notifications import num_push_devices_for_user
from zerver.lib.streams import access_stream_by_name
from zerver.lib.utils import statsd, get_subdomain
from zproject.backends import password_auth_enabled
from zproject.jinja2 import render_to_response
import calendar
import datetime
import logging
import os
import re
import simplejson
import time
@zulip_login_required
def accounts_accept_terms(request):
# type: (HttpRequest) -> HttpResponse
if request.method == "POST":
form = ToSForm(request.POST)
if form.is_valid():
do_change_tos_version(request.user, settings.TOS_VERSION)
return redirect(home)
else:
form = ToSForm()
email = request.user.email
special_message_template = None
if request.user.tos_version is None and settings.FIRST_TIME_TOS_TEMPLATE is not None:
special_message_template = 'zerver/' + settings.FIRST_TIME_TOS_TEMPLATE
return render_to_response(
'zerver/accounts_accept_terms.html',
{'form': form,
'email': email,
'special_message_template': special_message_template},
request=request)
def approximate_unread_count(user_profile):
# type: (UserProfile) -> int
not_in_home_view_recipients = [sub.recipient.id for sub in
Subscription.objects.filter(
user_profile=user_profile, in_home_view=False)]
# TODO: We may want to exclude muted messages from this count.
# It was attempted in the past, but the original attempt
# was broken. When we re-architect muting, we may
# want to to revisit this (see git issue #1019).
return UserMessage.objects.filter(
user_profile=user_profile, message_id__gt=user_profile.pointer).exclude(
message__recipient__type=Recipient.STREAM,
message__recipient__id__in=not_in_home_view_recipients).exclude(
flags=UserMessage.flags.read).count()
def sent_time_in_epoch_seconds(user_message):
# type: (UserMessage) -> Optional[float]
# user_message is a UserMessage object.
if not user_message:
return None
# We have USE_TZ = True, so our datetime objects are timezone-aware.
# Return the epoch seconds in UTC.
return calendar.timegm(user_message.message.pub_date.utctimetuple())
def home(request):
# type: (HttpRequest) -> HttpResponse
if settings.DEVELOPMENT and os.path.exists('var/handlebars-templates/compile.error'):
response = render_to_response('zerver/handlebars_compilation_failed.html',
request=request)
response.status_code = 500
return response
if not settings.SUBDOMAINS_HOMEPAGE:
return home_real(request)
# If settings.SUBDOMAINS_HOMEPAGE, sends the user the landing
# page, not the login form, on the root domain
subdomain = get_subdomain(request)
if subdomain != "":
return home_real(request)
return render_to_response('zerver/hello.html',
request=request)
@zulip_login_required
def home_real(request):
# type: (HttpRequest) -> HttpResponse
# We need to modify the session object every two weeks or it will expire.
# This line makes reloading the page a sufficient action to keep the
# session alive.
request.session.modified = True
user_profile = request.user
# If a user hasn't signed the current Terms of Service, send them there
if settings.TERMS_OF_SERVICE is not None and settings.TOS_VERSION is not None and \
int(settings.TOS_VERSION.split('.')[0]) > user_profile.major_tos_version():
return accounts_accept_terms(request)
narrow = [] # type: List[List[Text]]
narrow_stream = None
narrow_topic = request.GET.get("topic")
if request.GET.get("stream"):
try:
narrow_stream_name = request.GET.get("stream")
(narrow_stream, ignored_rec, ignored_sub) = access_stream_by_name(
user_profile, narrow_stream_name)
narrow = [["stream", narrow_stream.name]]
except Exception:
logging.exception("Narrow parsing")
if narrow_stream is not None and narrow_topic is not None:
narrow.append(["topic", narrow_topic])
register_ret = do_events_register(user_profile, request.client,
apply_markdown=True, narrow=narrow)
user_has_messages = (register_ret['max_message_id'] != -1)
# Reset our don't-spam-users-with-email counter since the
# user has since logged in
if user_profile.last_reminder is not None:
user_profile.last_reminder = None
user_profile.save(update_fields=["last_reminder"])
# Brand new users get the tutorial
needs_tutorial = settings.TUTORIAL_ENABLED and \
user_profile.tutorial_status != UserProfile.TUTORIAL_FINISHED
first_in_realm = realm_user_count(user_profile.realm) == 1
# If you are the only person in the realm and you didn't invite
# anyone, we'll continue to encourage you to do so on the frontend.
prompt_for_invites = first_in_realm and \
not PreregistrationUser.objects.filter(referred_by=user_profile).count()
if user_profile.pointer == -1 and user_has_messages:
# Put the new user's pointer at the bottom
#
# This improves performance, because we limit backfilling of messages
# before the pointer. It's also likely that someone joining an
# organization is interested in recent messages more than the very
# first messages on the system.
register_ret['pointer'] = register_ret['max_message_id']
user_profile.last_pointer_updater = request.session.session_key
if user_profile.pointer == -1:
latest_read = None
else:
try:
latest_read = UserMessage.objects.get(user_profile=user_profile,
message__id=user_profile.pointer)
except UserMessage.DoesNotExist:
# Don't completely fail if your saved pointer ID is invalid
logging.warning("%s has invalid pointer %s" % (user_profile.email, user_profile.pointer))
latest_read = None
desktop_notifications_enabled = user_profile.enable_desktop_notifications
if narrow_stream is not None:
desktop_notifications_enabled = False
if user_profile.realm.notifications_stream:
notifications_stream = user_profile.realm.notifications_stream.name
else:
notifications_stream = ""
# Set default language and make it persist
default_language = register_ret['default_language']
url_lang = '/{}'.format(request.LANGUAGE_CODE)
if not request.path.startswith(url_lang):
translation.activate(default_language)
request.session[translation.LANGUAGE_SESSION_KEY] = default_language
# Pass parameters to the client-side JavaScript code.
# These end up in a global JavaScript Object named 'page_params'.
page_params = dict(
# Server settings.
share_the_love = settings.SHARE_THE_LOVE,
development_environment = settings.DEVELOPMENT,
debug_mode = settings.DEBUG,
test_suite = settings.TEST_SUITE,
poll_timeout = settings.POLL_TIMEOUT,
login_page = settings.HOME_NOT_LOGGED_IN,
server_uri = settings.SERVER_URI,
maxfilesize = settings.MAX_FILE_UPLOAD_SIZE,
max_avatar_file_size = settings.MAX_AVATAR_FILE_SIZE,
server_generation = settings.SERVER_GENERATION,
use_websockets = settings.USE_WEBSOCKETS,
save_stacktraces = settings.SAVE_FRONTEND_STACKTRACES,
# realm data.
# TODO: Move all of these data to register_ret and pull from there
realm_uri = user_profile.realm.uri,
password_auth_enabled = password_auth_enabled(user_profile.realm),
domains = list_of_domains_for_realm(user_profile.realm),
name_changes_disabled = name_changes_disabled(user_profile.realm),
mandatory_topics = user_profile.realm.mandatory_topics,
show_digest_email = user_profile.realm.show_digest_email,
realm_presence_disabled = user_profile.realm.presence_disabled,
is_zephyr_mirror_realm = user_profile.realm.is_zephyr_mirror_realm,
# user_profile data.
# TODO: Move all of these data to register_ret and pull from there
fullname = user_profile.full_name,
email = user_profile.email,
enter_sends = user_profile.enter_sends,
user_id = user_profile.id,
is_admin = user_profile.is_realm_admin,
can_create_streams = user_profile.can_create_streams(),
autoscroll_forever = user_profile.autoscroll_forever,
default_desktop_notifications = user_profile.default_desktop_notifications,
avatar_url = avatar_url(user_profile),
avatar_url_medium = avatar_url(user_profile, medium=True),
avatar_source = user_profile.avatar_source,
timezone = user_profile.timezone,
# Stream message notification settings:
stream_desktop_notifications_enabled = user_profile.enable_stream_desktop_notifications,
stream_sounds_enabled = user_profile.enable_stream_sounds,
# Private message and @-mention notification settings:
desktop_notifications_enabled = desktop_notifications_enabled,
sounds_enabled = user_profile.enable_sounds,
enable_offline_email_notifications = user_profile.enable_offline_email_notifications,
pm_content_in_desktop_notifications = user_profile.pm_content_in_desktop_notifications,
enable_offline_push_notifications = user_profile.enable_offline_push_notifications,
enable_online_push_notifications = user_profile.enable_online_push_notifications,
enable_digest_emails = user_profile.enable_digest_emails,
# Realm foreign key data from register_ret.
# TODO: Rename these to match register_ret values.
subbed_info = register_ret['subscriptions'],
unsubbed_info = register_ret['unsubscribed'],
neversubbed_info = register_ret['never_subscribed'],
people_list = register_ret['realm_users'],
bot_list = register_ret['realm_bots'],
initial_pointer = register_ret['pointer'],
initial_presences = register_ret['presences'],
event_queue_id = register_ret['queue_id'],
# Misc. extra data.
have_initial_messages = user_has_messages,
initial_servertime = time.time(), # Used for calculating relative presence age
default_language_name = get_language_name(register_ret['default_language']),
language_list_dbl_col = get_language_list_for_templates(register_ret['default_language']),
language_list = get_language_list(),
needs_tutorial = needs_tutorial,
first_in_realm = first_in_realm,
prompt_for_invites = prompt_for_invites,
notifications_stream = notifications_stream,
cross_realm_bots = list(get_cross_realm_dicts()),
unread_count = approximate_unread_count(user_profile),
furthest_read_time = sent_time_in_epoch_seconds(latest_read),
has_mobile_devices = num_push_devices_for_user(user_profile) > 0,
)
# These fields will be automatically copied from register_ret into
# page_params. It is a goal to move more of the page_params list
# into this sort of cleaner structure.
page_params_core_fields = [
'alert_words',
'attachments',
'default_language',
'emoji_alt_code',
'last_event_id',
'left_side_userlist',
'max_icon_file_size',
'max_message_id',
'muted_topics',
'realm_add_emoji_by_admins_only',
'realm_allow_message_editing',
'realm_authentication_methods',
'realm_bot_domain',
'realm_create_stream_by_admins_only',
'realm_default_language',
'realm_default_streams',
'realm_email_changes_disabled',
'realm_emoji',
'realm_filters',
'realm_icon_source',
'realm_icon_url',
'realm_invite_by_admins_only',
'realm_invite_required',
'realm_message_content_edit_limit_seconds',
'realm_name',
'realm_name_changes_disabled',
'realm_restricted_to_domain',
'realm_waiting_period_threshold',
'referrals',
'twenty_four_hour_time',
'zulip_version',
]
for field_name in page_params_core_fields:
page_params[field_name] = register_ret[field_name]
if narrow_stream is not None:
# In narrow_stream context, initial pointer is just latest message
recipient = get_recipient(Recipient.STREAM, narrow_stream.id)
try:
initial_pointer = Message.objects.filter(recipient=recipient).order_by('id').reverse()[0].id
except IndexError:
initial_pointer = -1
page_params["narrow_stream"] = narrow_stream.name
if narrow_topic is not None:
page_params["narrow_topic"] = narrow_topic
page_params["narrow"] = [dict(operator=term[0], operand=term[1]) for term in narrow]
page_params["max_message_id"] = initial_pointer
page_params["initial_pointer"] = initial_pointer
page_params["have_initial_messages"] = (initial_pointer != -1)
statsd.incr('views.home')
show_invites = True
# Some realms only allow admins to invite users
if user_profile.realm.invite_by_admins_only and not user_profile.is_realm_admin:
show_invites = False
request._log_data['extra'] = "[%s]" % (register_ret["queue_id"],)
response = render_to_response('zerver/index.html',
{'user_profile': user_profile,
'page_params': simplejson.encoder.JSONEncoderForHTML().encode(page_params),
'nofontface': is_buggy_ua(request.META.get("HTTP_USER_AGENT", "Unspecified")),
'avatar_url': avatar_url(user_profile),
'show_debug':
settings.DEBUG and ('show_debug' in request.GET),
'pipeline': settings.PIPELINE_ENABLED,
'show_invites': show_invites,
'is_admin': user_profile.is_realm_admin,
'show_webathena': user_profile.realm.webathena_enabled,
'enable_feedback': settings.ENABLE_FEEDBACK,
'embedded': narrow_stream is not None,
},
request=request)
patch_cache_control(response, no_cache=True, no_store=True, must_revalidate=True)
return response
@zulip_login_required
def desktop_home(request):
# type: (HttpRequest) -> HttpResponse
return HttpResponseRedirect(reverse('zerver.views.home.home'))
def is_buggy_ua(agent):
# type: (str) -> bool
"""Discrimiate CSS served to clients based on User Agent
Due to QTBUG-3467, @font-face is not supported in QtWebKit.
This may get fixed in the future, but for right now we can
just serve the more conservative CSS to all our desktop apps.
"""
return ("Humbug Desktop/" in agent or "Zulip Desktop/" in agent or "ZulipDesktop/" in agent) and \
"Mac" not in agent
| apache-2.0 | -2,616,524,014,721,912,000 | 44.75067 | 113 | 0.642309 | false |
ZeitOnline/zeit.newsletter | src/zeit/newsletter/browser/edit.py | 1 | 2579 | from zeit.cms.i18n import MessageFactory as _
from zope.cachedescriptors.property import Lazy as cachedproperty
import os.path
import zeit.cms.browser.view
import zeit.cms.content.interfaces
import zeit.cms.interfaces
import zeit.content.image.interfaces
import zeit.content.video.interfaces
import zeit.edit.browser.form
import zeit.edit.browser.landing
import zeit.edit.browser.view
import zeit.newsletter.interfaces
import zope.formlib.form
class LandingZoneBase(zeit.edit.browser.landing.LandingZone):
uniqueId = zeit.edit.browser.view.Form('uniqueId')
block_type = 'teaser'
def initialize_block(self):
content = zeit.cms.interfaces.ICMSContent(self.uniqueId)
self.block.reference = content
class GroupLandingZone(LandingZoneBase):
"""Handler to drop objects to the body's landing zone."""
order = 0
class TeaserLandingZone(LandingZoneBase):
"""Handler to drop objects after other objects."""
order = 'after-context'
class Teaser(zeit.cms.browser.view.Base):
@cachedproperty
def metadata(self):
return zeit.cms.content.interfaces.ICommonMetadata(
self.context.reference, None)
@cachedproperty
def image(self):
# XXX copy&paste&tweak of zeit.content.cp.browser.blocks.teaser.Display
content = self.context.reference
if content is None:
return
if zeit.content.video.interfaces.IVideoContent.providedBy(content):
return content.thumbnail
images = zeit.content.image.interfaces.IImages(content, None)
if images is None:
preview = zope.component.queryMultiAdapter(
(content, self.request), name='preview')
if preview:
return self.url(preview)
return
if not images.image:
return
group = images.image
for name in group:
basename, ext = os.path.splitext(name)
if basename.endswith('148x84'):
image = group[name]
return self.url(image, '@@raw')
class Advertisement(zeit.cms.browser.view.Base):
@cachedproperty
def image(self):
if not self.context.image:
return
return self.url(self.context.image, '@@raw')
class GroupTitle(zeit.edit.browser.form.InlineForm):
legend = None
prefix = 'group'
undo_description = _('edit group title')
form_fields = zope.formlib.form.FormFields(
zeit.newsletter.interfaces.IGroup).select('title')
class Empty(object):
def render(self):
return u''
| bsd-3-clause | -2,014,002,219,757,949,200 | 27.032609 | 79 | 0.67119 | false |
vadyur/script.media.aggregator | anidub.py | 1 | 12586 | # coding: utf-8
import log
from log import debug
from settings import Settings
from base import *
import feedparser, urllib2, re
from bs4 import BeautifulSoup
from nfowriter import *
from strmwriter import *
import requests, filesystem
###################################################################################################
class DescriptionParser(DescriptionParserBase):
#==============================================================================================
def get_content(self, url):
page = urllib2.urlopen(url)
return page
#==============================================================================================
def __init__(self, url):
Informer.__init__(self)
self._dict = dict()
self.content = self.get_content(url)
#html_doc = '<?xml version="1.0" encoding="UTF-8" ?>\n<html>' + content.encode('utf-8') + '\n</html>'
self.soup = BeautifulSoup(self.content, 'html.parser')
self.OK = self.parse()
#==============================================================================================
def get_tag(self, x):
return {
u'Год: ': u'year',
u'Жанр: ': u'genre',
u'Описание: ': u'plot',
u'Режиссер: ': u'director',
u'Продолжительность: ': u'runtime',
u'Страна: ': u'country',
}.get(x, u'')
#==============================================================================================
def clean(self, title):
try:
title = title.split(u' ТВ-')[0]
title = title.split(u' TV-')[0]
title = title.split(u' [')[0]
except:
pass
return title.strip()
#==============================================================================================
def get_title(self, full_title):
try:
found = re.search('^(.+?) /', full_title).group(1)
return self.clean(found)
except AttributeError:
return full_title
#==============================================================================================
def get_original_title(self, full_title):
try:
found = re.search('^.+? / (.+)', full_title).group(1)
return self.clean(found)
except AttributeError:
return full_title
#==============================================================================================
def parse_season_from_title(self, title):
try:
found = re.search(r"(\d) \[\d+\D+\d+\]", title)
if found:
try:
self._dict['season'] = int(found.group(1))
return
except:
pass
parts = title.split(u'ТВ-')
if len(parts) == 1:
parts = title.split(u'TV-')
if len(parts) > 1:
found = re.search('([0-9]+)', parts[1]).group(1)
self._dict['season'] = int(found)
except:
pass
#==============================================================================================
def get_episodes_num(self, full_title):
try:
found = re.search(' \[([0-9]+) ', full_title).group(1)
return int(found)
except AttributeError:
return 1
def date_added_duration(self):
ul = self.soup.find('ul', class_='story_inf')
if ul:
for li in ul.find_all('li'):
txt = li.get_text()
parts = txt.split(':')
if len(parts) > 1 and parts[0] == u'Дата':
date, t = parts[1].split(',') # d u' 30-09-2012' unicode
from datetime import datetime, timedelta
day = timedelta(1)
yesterday = datetime.today() - day
#date = ' 30-09-2012'
if u'Сегодня' in date:
d = datetime.today()
elif u'Вчера' in date:
d = yesterday
else:
try:
d = datetime.strptime(date.strip(), '%d-%m-%Y')
except TypeError:
d = datetime.today()
dt = datetime.today() - d
return dt
#==============================================================================================
def parse(self):
tag = u''
self._dict['gold'] = False
self._dict['season'] = 1
for title in self.soup.select('#news-title'):
full_title = title.get_text()
debug(full_title)
self._dict['title'] = self.get_title(full_title)
self._dict['originaltitle'] = self.get_original_title(full_title)
self.parse_season_from_title(full_title)
self._dict['episodes'] = self.get_episodes_num(full_title)
for b in self.soup.select('div.xfinfodata b'):
try:
text = b.get_text()
tag = self.get_tag(text)
if tag != '':
span = b.find_next_sibling('span')
self._dict[tag] = span.get_text().strip()
except:
pass
for div in self.soup.select('div.story_c'):
try:
text = div.get_text()
text = text.split(u'Описание:')[1]
text = text.split(u'Эпизоды')[0]
text = text.split(u'Скриншоты')[0]
text = text.strip()
self._dict['plot'] = text
#debug('---')
#debug(text)
#debug('---')
except:
pass
for b in self.soup.select('div.story_h .rcol sup b'):
try:
text = b.get_text()
text = text.split(' ')[0]
self._dict['rating'] = float(text) * 2
debug('rating: ' + str(self._dict['rating']))
except:
pass
for img in self.soup.select('span.poster img'):
try:
self._dict['thumbnail'] = img['src'].strip()
debug(self._dict['thumbnail'])
except:
pass
fanart = []
for a in self.soup.select('ul.clr li a'):
try:
debug(a['href'])
fanart.append(a['href'].strip())
except:
pass
if len(fanart) != 0:
self._dict['fanart'] = fanart
# else:
# dt = self.date_added_duration()
# if dt and dt.days <= 14:
# return False
for img in self.soup.select('div.video_info a img'):
try:
self._dict['studio'] = img['alt'].strip()
debug(self._dict['studio'])
except:
pass
tags = []
for a in self.soup.select('a[href*="https://tr.anidub.com/tags/"]'):
tags.append(a.get_text().strip())
if len(tags) > 0:
self._dict['tag'] = tags
return True
###################################################################################################
def write_tvshow_nfo(parser, tvshow_api, tvshow_path):
try:
if write_tvshow_nfo.favorites:
parser.Dict().get('tag', []).append('favorites')
except:
pass
NFOWriter(parser, tvshow_api=tvshow_api).write_tvshow_nfo(tvshow_path)
return
###################################################################################################
def write_tvshow(content, path, settings):
with filesystem.save_make_chdir_context(path, 'Anidub.write_tvshow'):
d = feedparser.parse(content)
cnt = 0
settings.progress_dialog.update(0, 'anidub', path)
for item in d.entries:
write_tvshow_item(item, path, settings)
cnt += 1
settings.progress_dialog.update(cnt * 100 / len(d.entries), 'anidub', path)
def write_tvshow_item(item, path, settings, path_out=[]):
debug('-------------------------------------------------------------------------')
debug(item.link)
parser = DescriptionParser(item.link)
if parser.parsed():
title = parser.get_value('title')
debug(title)
originaltitle = parser.get_value('originaltitle')
debug(originaltitle)
season = parser.get_value('season')
from downloader import TorrentDownloader
TorrentDownloader(item.link, settings.torrents_path(), settings).download()
debug('Episodes: ' + str(parser.get_value('episodes')))
tvshow_path = make_fullpath(title, '')
tvshow_path = filesystem.join(path, tvshow_path)
debug(tvshow_path)
path_out.append(tvshow_path)
settings.update_paths.add(tvshow_path)
with filesystem.save_make_chdir_context(tvshow_path, 'Anidub.write_tvshow_item'):
tvshow_api = TVShowAPI.get_by(originaltitle, title)
write_tvshow_nfo(parser, tvshow_api, tvshow_path)
season_path = filesystem.join(tvshow_path, u'Season ' + unicode(season))
debug(season_path)
with filesystem.save_make_chdir_context(season_path, 'Anidub.write_tvshow_item_2'):
episodes = tvshow_api.episodes(season)
if len(episodes) < parser.get_value('episodes'):
for i in range(len(episodes) + 1, parser.get_value('episodes') + 1):
episodes.append({
'title': title,
'showtitle': title,
'short': 's%02de%02d' % (season, i),
'episode': i,
'season': season
})
for episode in episodes:
title = episode['title']
shortName = episode['short']
episodeNumber = episode['episode']
if episodeNumber <= parser.get_value('episodes'):
filename = str(episodeNumber) + '. ' + 'episode_' + shortName
debug(filename)
ep = tvshow_api.Episode(season, episodeNumber)
if ep:
episode = ep
STRMWriter(item.link).write(filename, season_path, episodeNumber=episodeNumber, settings=settings)
NFOWriter(parser, tvshow_api=tvshow_api).write_episode(episode, filename, season_path)
else:
skipped(item)
del parser
def get_session(settings):
s = requests.Session()
data = {"login_name": settings.anidub_login, "login_password": settings.anidub_password, "login": "submit"}
headers = {
'Host': 'tr.anidub.com',
'Origin': 'https://tr.anidub.com',
'Referer': 'https://tr.anidub.com/',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132'
}
login = s.post("https://tr.anidub.com/", data=data, headers=headers)
debug('Login status: %d' % login.status_code)
if 'login_name' in login.content:
debug('Login failed')
return s
def download_torrent(url, path, settings):
from base import save_hashes
save_hashes(path)
url = urllib2.unquote(url)
debug('download_torrent:' + url)
s = get_session(settings)
page = s.get(url)
#debug(page.text.encode('utf-8'))
soup = BeautifulSoup(page.text, 'html.parser')
try:
a = soup.select_one('#tv720 div.torrent_h a')
except TypeError:
a = None
try:
if a is None:
a = soup.select_one('div.torrent_h > a')
except TypeError:
a = None
if a is not None:
href = 'https://tr.anidub.com' + a['href']
debug(s.headers)
r = s.get(href, headers={'Referer': url})
debug(r.headers)
if 'Content-Type' in r.headers:
if not 'torrent' in r.headers['Content-Type']:
return False
try:
with filesystem.fopen(path, 'wb') as torr:
for chunk in r.iter_content(100000):
torr.write(chunk)
save_hashes(path)
return True
except:
pass
return False
def write_pages(url, path, settings, params={}, filter_fn=None, dialog_title = None, path_out=[]):
s = get_session(settings)
if params:
page = s.post(url, data=params)
else:
page = s.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
page_no = 1
cnt = 0
class Item:
def __init__(self, link, title):
self.link = link
self.title = title
with filesystem.save_make_chdir_context(path, 'Anidub.write_pages'):
while True:
if params:
selector = soup.select('div.search_post > div.text > h2 > a')
else:
selector = soup.select('article.story > div.story_h > div.lcol > h2 > a')
if not selector:
break
settings.progress_dialog.update(0, dialog_title, path)
for a in selector:
log.debug(a['href'])
link = a['href']
title = a.get_text()
if filter_fn and filter_fn(title):
continue
write_tvshow_item(Item(link, title), path, settings, path_out)
cnt += 1
settings.progress_dialog.update(cnt * 100 / len(selector), dialog_title, path)
if not 'favorites' in url:
break
page_no += 1
page = s.get(url + 'page/%d/' % page_no)
if page.status_code == requests.codes.ok:
soup = BeautifulSoup(page.text, 'html.parser')
else:
break
return cnt
def write_favorites(path, settings):
write_pages('https://tr.anidub.com/favorites/', path, settings, dialog_title=u'Избранное AniDUB')
def search_generate(what, settings, path_out):
def filter(title):
if what not in title:
return True
return False
write_tvshow_nfo.favorites = False
return write_pages('https://tr.anidub.com/index.php?do=search',
settings.anime_tvshow_path(), settings,
{'do': 'search',
'subaction': 'search',
'story': what.encode('utf-8')}, filter,
dialog_title=u'Поиск AniDUB',
path_out=path_out)
###################################################################################################
def run(settings):
if settings.anime_save:
if settings.anidub_rss:
write_tvshow_nfo.favorites = False
write_tvshow(settings.anidub_url, settings.anime_tvshow_path(), settings)
if settings.anidub_favorite:
write_tvshow_nfo.favorites = True
write_favorites(settings.anime_tvshow_path(), settings)
if __name__ == '__main__':
settings = Settings('../media_library')
run(settings)
| gpl-3.0 | 2,000,693,373,387,839,200 | 26.610619 | 118 | 0.567228 | false |
robdobsn/AmazonEchoShopping | WaitroseService/WaitroseScraper.py | 1 | 20691 | # Waitrose web scraper
__author__ = 'robdobsn'
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import selenium.webdriver.support.ui as webdriverui
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException, WebDriverException, TimeoutException
from selenium.webdriver.support import expected_conditions as EC
from bs4 import BeautifulSoup
import logging
import json
import re
class WaitroseScraper():
def __init__(self):
logging.info("Waitrose scraper starting")
self.isInitalized = False
self.isLoggedIn = False
self.webDriverType = "PhantomJS"
self.execUsingJS = False
def clickButtonByClassName(self, className):
if self.execUsingJS:
self.webDriver.execute_script("document.getElementsByClassName('" + className + "')[0].click()")
else:
btn = self.webDriver.find_element_by_class_name(className)
btn.click()
def clickButtonByXPath(self, xpath):
if self.execUsingJS:
self.webDriver.execute_script("return document.evaluate('" + xpath + "', document, null, XPathResult.FIRST_ORDERED_NODE_TYPE, null).singleNodeValue.click()")
else:
btn = self.webDriver.find_element_by_xpath(xpath)
btn.click()
def clickButtonByCSSSelector(self, cssSelector):
btn = self.webDriver.find_element_by_css_selector(cssSelector)
btn.click()
def checkButtonEnabledByCSSSelector(self, cssSelector):
btn = self.webDriver.find_element_by_css_selector(cssSelector)
return btn.is_enabled() and btn.is_displayed()
def sendKeysToFieldById(self, elemId, strToSend, pressEnterAfter, clearFirst):
# if self.execUsingJS:
# self.webDriver.execute_script("document.getElementsByClassName('" + elemId + "').value = '" + strToSend)
# else:
print("Sending keys to elemId " + elemId + " keys = " + strToSend)
field = self.webDriver.find_element_by_id(elemId)
print(field)
if (clearFirst):
field.send_keys(Keys.CONTROL + "a")
field.send_keys(Keys.DELETE)
field.send_keys(strToSend + (Keys.RETURN if pressEnterAfter else ""))
def debugDumpPageSource(self, filenameExtra=""):
with open("debugPageSource" + filenameExtra + ".html", "w") as debugDumpFile:
debugDumpFile.write(self.webDriver.page_source)
self.webDriver.save_screenshot('debugPageImage.png')
# Start the web driver (runs the browser)
def startWebDriver(self):
# Clear current session file info
with open('browserSession.json', 'w') as outfile:
json.dump({}, outfile)
# Create WebDriver
if self.webDriverType == "Chrome":
try:
self.webDriver = webdriver.Chrome()
except WebDriverException:
logging.error("startWebDriver() Chrome Failed to start")
return False
elif self.webDriverType == "Firefox":
try:
self.webDriver = webdriver.Firefox()
except WebDriverException:
logging.error("startWebDriver() Firefox Failed to start")
return False
elif self.webDriverType == "PhantomJS":
try:
self.webDriver = webdriver.PhantomJS() # or add to your PATH
except:
try:
self.webDriver = webdriver.PhantomJS(
executable_path='C:\ProgramData\PhantomJS\bin')
except:
try:
self.webDriver = webdriver.PhantomJS(
executable_path='/usr/local/lib/node_modules/phantomjs/lib/phantom/bin/phantomjs')
except:
try:
self.webDriver = webdriver.PhantomJS(
executable_path=r'C:\Users\rob_2\AppData\Roaming\npm\node_modules\phantomjs\lib\phantom\bin\phantomjs.exe')
except:
logging.error("Failed to load the PhantomJS webdriver")
return False
# Set the window size (seems to be needed in phantomJS particularly
# This is probably because the website responds in mobile mode?
self.webDriver.set_window_size(1280,1024)
# Save session info
url = self.webDriver.command_executor._url
session_id = self.webDriver.session_id
with open('browserSession.json', 'w') as outfile:
json.dump({"url": url, "session_id": session_id}, outfile)
return True
def websiteLogin(self, username, password, attemptIdx):
try:
self.webDriver.save_screenshot('debug1_'+str(attemptIdx)+'.png')
logging.info("Waiting for signInRegister button")
wait = WebDriverWait(self.webDriver, 30)
wait.until(EC.visibility_of_element_located((By.CLASS_NAME, "js-sign-in-register")))
logging.info("waitroseLogin() pressing signInRegister button")
self.clickButtonByClassName('js-sign-in-register')
self.webDriver.save_screenshot('debug2_'+str(attemptIdx)+'.png')
try:
print("Starting to wait for logon-email")
wait = WebDriverWait(self.webDriver, 30)
wait.until(EC.visibility_of_element_located((By.ID, "logon-email")))
print("Finished waiting for logon-email")
self.webDriver.save_screenshot('debug3_' + str(attemptIdx) + '.png')
try:
logging.info("waitroseLogin() entering username")
self.debugDumpPageSource("contbutton")
self.sendKeysToFieldById('logon-email', username, False, True)
self.webDriver.save_screenshot('debug4_' + str(attemptIdx) + '.png')
# self.clickButtonByXPath("//input[@type='button' and @value='Continue']")
if (self.checkButtonEnabledByCSSSelector("input[value='Continue'][type='button']")):
self.clickButtonByCSSSelector("input[value='Continue'][type='button']")
try:
logging.info("waitroseLogin() waiting for logon-password visible")
wait = WebDriverWait(self.webDriver, 60)
wait.until(EC.visibility_of_element_located((By.ID, "logon-password")))
self.webDriver.save_screenshot('debug5_' + str(attemptIdx) + '.png')
try:
logging.info("waitroseLogin() entering password")
self.sendKeysToFieldById('logon-password', password, False, True)
#self.clickButtonById('logon-button-sign-in')
self.clickButtonByCSSSelector("input[value='Sign in'][type='button']")
self.webDriver.save_screenshot('debug6_' + str(attemptIdx) + '.png')
logging.info("waitroseLogin() waiting for trolley-total to be visible")
wait = WebDriverWait(self.webDriver, 60)
wait.until(EC.visibility_of_element_located((By.CLASS_NAME, "trolley-total")))
self.webDriver.save_screenshot('debug7_' + str(attemptIdx) + '.png')
elem2 = self.webDriver.find_element_by_class_name('trolley-total')
if elem2:
logging.info("waitroseLogin() basket found")
else:
logging.info("waitroseLogin() basket not found")
return True
except WebDriverException as err:
logging.error("waitroseLogin() Cannot find logon-password after wait " + err.msg)
self.debugDumpPageSource()
except WebDriverException as err:
logging.error("waitroseLogin() Cannot find logon-password field" + err.msg)
self.debugDumpPageSource()
except WebDriverException as err:
logging.error("waitroseLogin() Error entering logon-email" + err.msg)
self.debugDumpPageSource()
except WebDriverException as err:
logging.error("waitroseLogin() Cannot find logon-email field" + err.msg)
self.debugDumpPageSource()
except WebDriverException as err:
logging.error("waitroseLogin() Cannot find sign-in-register button" + err.msg)
self.debugDumpPageSource()
return False
def getBasketSummary(self):
basketSummary = {}
# Ensure we wait until the trolley-total is visible
try:
wait = WebDriverWait(self.webDriver, 20)
wait.until(EC.visibility_of_element_located((By.CLASS_NAME, "trolley-total")))
except TimeoutException:
logging.error("Get basket summary timeout exception")
self.debugDumpPageSource()
return None
except WebDriverException:
logging.error("Get basket summary webdriver element exception")
self.debugDumpPageSource()
return None
# Get basket total price
try:
totalElem = self.webDriver.find_element_by_class_name('trolley-total')
if totalElem:
reTotalElem = re.search("([0-9]{1,4}\.[0-9]{2})", totalElem.text)
if reTotalElem:
basketSummary["totalPrice"] = reTotalElem.group(1)
logging.info("waitrose: Basket: total=" + str(basketSummary["totalPrice"]))
# Get number of basket items
summaryElem = self.webDriver.find_element_by_class_name('trolley-summary')
if summaryElem:
reSummaryElem = re.search("([0-9]{1,4}) items", summaryElem.text)
if reSummaryElem:
basketSummary["numItems"] = reSummaryElem.group(1)
logging.info("waitrose: Basket: num items=" + str(basketSummary["numItems"]))
except WebDriverException:
logging.error("waitrose: Get basket summary webdriver element exception")
self.debugDumpPageSource()
return None
# Return info found
return basketSummary
def getElemAttrIfPresent(self, soup, elemName, className, subElem, attrName, regexReplace, destDict=None, dictName=None):
rslt = ""
try:
el = soup.find(elemName, class_=className)
if subElem is not "":
el = el.find(subElem)
if attrName == "text":
rslt = el.get_text()
else:
rslt = el[attrName]
if regexReplace is not "":
rslt = re.sub(regexReplace, "", rslt)
if destDict is not None:
destDict[dictName] = rslt
except WebDriverException:
logging.error("waitrose: Error extracting element " + elemName + " " + className)
self.debugDumpPageSource()
except:
logging.error("waitrose: Error (not webdriver) extracting element " + elemName + " " + className)
self.debugDumpPageSource()
return rslt
def getShoppingItems(self, isTrolleyPage):
# Make sure all items on the page are loaded - lazy loader
try:
self.debugDumpPageSource("m-product")
webdriverui.WebDriverWait(self.webDriver, 10)\
.until(EC.visibility_of_element_located((By.CLASS_NAME, "m-product")))
except WebDriverException:
logging.error("Wait for m-product webdriver element exception")
return []
productsFound = self.webDriver.find_elements_by_class_name("m-product")
print("waitrose: Lazy loading products - currently " + str(len(productsFound)) + " found")
numRepeats = 0
if len(productsFound) > 10:
while True:
prevFound = len(productsFound)
self.webDriver.execute_script("window.scrollBy(0,window.innerHeight)")
productsFound = self.webDriver.find_elements_by_class_name("m-product")
print("Loading products - currently " + str(len(productsFound)) + " found")
if len(productsFound) <= prevFound:
numRepeats += 1
if numRepeats > 20:
break
else:
numRepeats = 0
print("Done lazy loading products " + str(len(productsFound)) + " found")
# Go through items in the list on the current page
shoppingItems = []
for product in productsFound:
# Get HTML for this product
basketIt = {}
el = product.get_attribute("innerHTML")
productSoup = BeautifulSoup(el, "html.parser")
# Extract some common details
self.getElemAttrIfPresent(productSoup, "a", "m-product-open-details", "", "href", "", basketIt, "detailsHref")
self.getElemAttrIfPresent(productSoup, "a", "m-product-open-details", "img", "src", "", basketIt, "imageSrc")
self.getElemAttrIfPresent(productSoup, "div", "m-product-volume", "", "text", r"\W", basketIt, "productVolume")
# Check if we are doing the trolley page - which has extra info like number of items ordered
if isTrolleyPage:
self.getElemAttrIfPresent(productSoup, "div", "m-product-title", "a", "text", "", basketIt, "productTitle")
if not "productTitle" in basketIt or basketIt["productTitle"] == "":
self.getElemAttrIfPresent(productSoup, "a", "m-product-open-details", "img", "title", "", basketIt,
"productTitle")
self.getElemAttrIfPresent(productSoup, "div", "quantity-append", "input", "value", "", basketIt,
"trolleyQuantity")
self.getElemAttrIfPresent(productSoup, "p", "m-product-details", "span", "text", "", basketIt,
"trolleyPrice")
self.getElemAttrIfPresent(productSoup, "div", "m-product-details-container", "div", "data-price", "",
basketIt,
"price")
self.getElemAttrIfPresent(productSoup, "div", "m-product-details-container", "div", "data-priceperkg",
"", basketIt, "pricePerKg")
self.getElemAttrIfPresent(productSoup, "div", "m-product-details-container", "div", "data-orderitemid",
"", basketIt, "orderItemId")
self.getElemAttrIfPresent(productSoup, "div", "m-product-details-container", "div", "data-producttype",
"", basketIt, "productType")
self.getElemAttrIfPresent(productSoup, "div", "m-product-details-container", "div", "data-productid",
"", basketIt, "productId")
self.getElemAttrIfPresent(productSoup, "div", "m-product-details-container", "div", "data-uom", "", basketIt,
"UOM")
self.getElemAttrIfPresent(productSoup, "div", "m-product-details-container", "div", "data-weighttype",
"", basketIt, "weightType")
self.getElemAttrIfPresent(productSoup, "div", "m-product-details-container", "div", "data-substitute",
"", basketIt, "substitute")
else:
self.getElemAttrIfPresent(productSoup, "div", "m-product-price-container", "span", "text", "\W", basketIt,
"price")
self.getElemAttrIfPresent(productSoup, "a", "m-product-open-details", "", "text", "", basketIt,
"productTitle")
if not "productTitle" in basketIt or basketIt["productTitle"] == "":
self.getElemAttrIfPresent(productSoup, "a", "m-product-open-details", "img", "title", "", basketIt,
"productTitle")
# Check if the product at least has a title and only add to list if it does
if not "productTitle" in basketIt or basketIt["productTitle"] == "":
logging.error("Extract Shopping List: Failed to extract product name")
else:
shoppingItems.append(basketIt)
return shoppingItems
def getTrolleyContents(self):
# Ensure we wait until the trolley-total is visible
try:
wait = WebDriverWait(self.webDriver, 20)
wait.until(EC.visibility_of_element_located((By.CLASS_NAME, "trolley-total")))
except WebDriverException:
logging.error("Wait for Trolley-Total webdriver element exception")
self.debugDumpPageSource()
return None
# Navigate to the basket contents
try:
self.clickButtonByXPath('//div[@class="mini-trolley"]//a')
wait = WebDriverWait(self.webDriver, 30)
wait.until(EC.visibility_of_element_located((By.ID, "my-trolley")))
except NoSuchElementException:
logging.error("Press view trolley button no such element")
self.debugDumpPageSource()
return None
except WebDriverException:
logging.error("Press view trolley button webdriver element exception")
self.debugDumpPageSource()
return None
# Get the shopping items on the current page
return self.getShoppingItems(True)
def getFavourites(self):
# Ensure we wait until the favourites is visible
try:
wait = WebDriverWait(self.webDriver, 20)
wait.until(EC.visibility_of_element_located((By.CLASS_NAME, "js-navbar-favourites")))
except WebDriverException:
logging.error("Wait for favourites button webdriver element exception")
self.debugDumpPageSource()
return None
# Navigate to the favourites
try:
FAVOURITES_BUTTON_XPATH = '//a[@class="js-navbar-favourites"]'
elemBasketBtn = self.webDriver.find_element_by_xpath(FAVOURITES_BUTTON_XPATH)
print(elemBasketBtn)
elemBasketBtn.click()
wait = WebDriverWait(self.webDriver, 60)
wait.until(EC.visibility_of_element_located((By.CLASS_NAME, "products-grid")))
except NoSuchElementException:
logging.error("Press view favourites button no such element")
self.debugDumpPageSource()
return None
except WebDriverException:
logging.error("Press view favourites button webdriver element exception")
self.debugDumpPageSource()
return None
# Get the shopping items on the current page
return self.getShoppingItems(False)
# Handle site login
def siteLogin(self, siteUrl, username, password, titleMustContainStr):
# Start webDriver
if not self.startWebDriver():
logging.error("Unable to start webdriver")
return False
self.isInitalized = True
# Go to URL
logging.info("Webdriver going to " + siteUrl)
self.webDriver.get(siteUrl)
logging.info("Webdriver site title = " + self.webDriver.title)
if not titleMustContainStr in self.webDriver.title:
logging.error("Site " + siteUrl + " title doesn't contain " + titleMustContainStr)
self.debugDumpPageSource()
return False
# Handle login
self.isLoggedIn = self.websiteLogin(username, password, 1)
# Succeeded so far
return self.isLoggedIn
# Ensure that we are logged in
def ensureLoggedIn(self, username, password):
# Ensure we are initialised
if not self.isInitalized:
self.siteLogin("http://www.waitrose.com", username, password, "Waitrose")
# Try to login again if not currently logged in
if self.isInitalized:
if not self.isLoggedIn:
self.isLoggedIn = self.websiteLogin(username, password, 2)
return self.isLoggedIn
| isc | -783,531,863,977,082,500 | 46.895833 | 169 | 0.581654 | false |
kidscancode/gamedev | pygame template.py | 1 | 1508 | # Pygame Template
# Use this to start a new Pygame project
# KidsCanCode 2015
import pygame
import random
# define some colors (R, G, B)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
BLACK = (0, 0, 0)
FUCHSIA = (255, 0, 255)
GRAY = (128, 128, 128)
LIME = (0, 128, 0)
MAROON = (128, 0, 0)
NAVYBLUE = (0, 0, 128)
OLIVE = (128, 128, 0)
PURPLE = (128, 0, 128)
RED = (255, 0, 0)
SILVER = (192, 192, 192)
TEAL = (0, 128, 128)
YELLOW = (255, 255, 0)
ORANGE = (255, 128, 0)
CYAN = (0, 255, 255)
# basic constants to set up your game
WIDTH = 360
HEIGHT = 480
FPS = 30
BGCOLOR = BLACK
# initialize pygame
pygame.init()
# initialize sound - uncomment if you're using sound
# pygame.mixer.init()
# create the game window and set the title
screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("My Game")
# start the clock
clock = pygame.time.Clock()
# set the 'running' variable to False to end the game
running = True
# start the game loop
while running:
# keep the loop running at the right speed
clock.tick(FPS)
# Game loop part 1: Events #####
for event in pygame.event.get():
# this one checks for the window being closed
if event.type == pygame.QUIT:
pygame.quit()
# add any other events here (keys, mouse, etc.)
# Game loop part 2: Updates #####
# Game loop part 3: Draw #####
screen.fill(BGCOLOR)
# after drawing, flip the display
pygame.display.flip()
# close the window
pygame.quit()
| mit | 3,454,655,415,024,161,000 | 22.936508 | 55 | 0.63992 | false |
JacobFischer/Joueur.py | games/anarchy/forecast.py | 1 | 2074 | # Forecast: The weather effect that will be applied at the end of a turn, which causes fires to spread.
# DO NOT MODIFY THIS FILE
# Never try to directly create an instance of this class, or modify its member variables.
# Instead, you should only be reading its variables and calling its functions.
from games.anarchy.game_object import GameObject
# <<-- Creer-Merge: imports -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# you can add additional import(s) here
# <<-- /Creer-Merge: imports -->>
class Forecast(GameObject):
"""The class representing the Forecast in the Anarchy game.
The weather effect that will be applied at the end of a turn, which causes fires to spread.
"""
def __init__(self):
"""Initializes a Forecast with basic logic as provided by the Creer code generator."""
GameObject.__init__(self)
# private attributes to hold the properties so they appear read only
self._controlling_player = None
self._direction = ""
self._intensity = 0
@property
def controlling_player(self):
"""The Player that can use WeatherStations to control this Forecast when its the nextForecast.
:rtype: games.anarchy.player.Player
"""
return self._controlling_player
@property
def direction(self):
"""The direction the wind will blow fires in. Can be 'north', 'east', 'south', or 'west'.
:rtype: str
"""
return self._direction
@property
def intensity(self):
"""How much of a Building's fire that can be blown in the direction of this Forecast. Fire is duplicated (copied), not moved (transfered).
:rtype: int
"""
return self._intensity
# <<-- Creer-Merge: functions -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# if you want to add any client side logic (such as state checking functions) this is where you can add them
# <<-- /Creer-Merge: functions -->>
| mit | 66,524,175,843,730,740 | 37.407407 | 146 | 0.671167 | false |
hhucn/git-vote | git-vote/__main__.py | 1 | 3022 | import argparse
import collections
import re
import subprocess
NOTES_REF = 'refs/notes/votes'
Vote = collections.namedtuple('Vote', ['commit', 'user'])
def vote(args):
assert args.user, 'TODO: determine user automatically'
vote = 'vote:%s' % args.user
subprocess.check_call([
'git', 'notes', '--ref', NOTES_REF, 'append', '--allow-empty', '-m', vote, args.COMMIT],
cwd=args.repo_dir)
# TODO: prevent voting twice as same user
def get_all_votes(repo_dir):
output_bytes = subprocess.check_output([
'git', 'notes', '--ref', NOTES_REF, 'list'],
cwd=repo_dir)
output = output_bytes.decode('utf-8')
for line in output.splitlines():
if not line:
continue
votenote_ref, commit_id = line.split()
# TODO use dulwich or something more efficient here
votenote_bytes = subprocess.check_output(
['git', 'show', votenote_ref],
cwd=repo_dir)
votenote_content = votenote_bytes.decode('utf-8') # TODO ignore invalid votes
for voteline in votenote_content.splitlines():
if not voteline:
continue
m = re.match(r'^vote:(?P<user>[a-z0-9@._]+)$', voteline.strip()) # TODO check re for user spec
if not m:
print('Skipping crap %r' % voteline)
continue
user = m.group('user')
yield Vote(commit=commit_id, user=user)
def print_list(args):
all_votes = get_all_votes(args.repo_dir)
all_votes_sorted = sorted(all_votes, key=lambda v: (v.commit, v.user))
for v in all_votes_sorted:
print('%s: +1 from %s' % (v.commit, v.user))
def tally(all_votes):
""" Returns a dict commit id => set of users """
res = collections.defaultdict(set)
for v in all_votes:
res[v.commit].add(v.user)
return res
def print_tally(args):
all_votes = get_all_votes(args.repo_dir)
for commit, votes in sorted(tally(all_votes).items(), key=lambda kv: (kv[1], kv[0])):
print('%s: %d votes' % (commit, len(votes)))
def print_elect(args):
all_votes = get_all_votes(args.repo_dir)
winner_vcount, winner_commit = max((len(votes), commit) for commit, votes in tally(all_votes).items())
# TODO more algorithms
print('%s won the election with %d votes' % (winner_commit, winner_vcount))
def main():
parser = argparse.ArgumentParser('Vote on git commands')
parser.add_argument('-r', '--repo-dir', metavar='DIR', help='root directory of the repository to modify')
subparsers = parser.add_subparsers(dest='cmd')
vote_parser = subparsers.add_parser('vote', help='Vote for commit')
vote_parser.add_argument('--user', metavar='USER_ID', help='ID of the user to vote as')
vote_parser.add_argument('COMMIT', help='reference to the commit to vote for')
subparsers.add_parser('list', help='List all votes')
subparsers.add_parser('tally', help='Tally all votes')
subparsers.add_parser('elect', help='Elect a commit')
args = parser.parse_args()
if args.cmd == 'vote':
vote(args)
elif args.cmd == 'list':
print_list(args)
elif args.cmd == 'tally':
print_tally(args)
elif args.cmd == 'elect':
print_elect(args)
else:
parser.print_help()
if __name__ == '__main__':
main()
| apache-2.0 | 2,600,720,082,252,724,000 | 28.627451 | 106 | 0.676704 | false |
DigitalCampus/django-nurhi-oppia | oppia/tests/av/test_permissions.py | 1 | 2347 | from django.urls import reverse
from django.test import TestCase
from oppia.tests.utils import *
class PermissionsViewTest(TestCase):
fixtures = ['user.json', 'oppia.json', 'quiz.json', 'permissions.json']
def setUp(self):
super(PermissionsViewTest, self).setUp()
self.login_url = reverse('profile_login')
self.admin_user = {
'user': 'admin',
'password': 'password'
}
self.staff_user = {
'user': 'staff',
'password': 'password'
}
self.normal_user = {
'user': 'demo',
'password': 'password'
}
self.teacher_user = {
'user': 'teacher',
'password': 'password'
}
def get_view(self, route, user=None):
if user is not None:
self.client.login(username=user['user'], password=user['password'])
return self.client.get(route)
def assert_response(self, view, status_code, user=None, view_kwargs=None):
route = reverse(view, kwargs=view_kwargs)
res = self.get_view(route, user)
self.assertEqual(res.status_code, status_code)
return res
def assert_can_view(self, view, user=None, view_kwargs=None):
return self.assert_response(view, 200, user, view_kwargs)
def assert_cannot_view(self, view, user=None, view_kwargs=None):
return self.assert_response(view, 401, user, view_kwargs)
def assert_unauthorized(self, view, user=None, view_kwargs=None):
return self.assert_response(view, 403, user, view_kwargs)
def assert_must_login(self, view, user=None, view_kwargs=None):
route = reverse(view, kwargs=view_kwargs)
res = self.get_view(route, user)
login_url = self.login_url + '?next=' + route
self.assertRedirects(res, login_url)
return res
############ upload media file #############
def test_anon_cantview_av_upload(self):
self.assert_must_login('oppia_av_upload')
def test_admin_canview_av_upload(self):
self.assert_can_view('oppia_av_upload', self.admin_user)
def test_staff_canview_av_upload(self):
self.assert_can_view('oppia_av_upload', self.staff_user)
def test_student_cantview_av_upload(self):
self.assert_unauthorized('oppia_av_upload', self.normal_user)
| gpl-3.0 | -4,483,428,020,845,946,400 | 33.514706 | 79 | 0.608862 | false |
kerimlcr/ab2017-dpyo | ornek/lollypop/lollypop-0.9.229/src/web.py | 1 | 7411 | # Copyright (c) 2014-2016 Cedric Bellegarde <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import GObject, GLib
from threading import Thread
from time import time
from lollypop.sqlcursor import SqlCursor
from lollypop.tagreader import TagReader
from lollypop.web_youtube import WebYouTube
from lollypop.web_jgm90 import WebJmg90
from lollypop.define import Lp, DbPersistent, Type
from lollypop.lio import Lio
class Web(GObject.Object):
"""
Web helper
"""
__gsignals__ = {
'saved': (GObject.SignalFlags.RUN_FIRST, None, (int,)),
'progress': (GObject.SignalFlags.RUN_FIRST, None, (float,))
}
def play_track(track, play, callback):
"""
Play track
@param track as Track
@param play as bool
@param callback as func(uri: str, track: Track, play: bool)
"""
if track.is_jgm:
uri = WebJmg90.get_uri_content(track.uri)
elif track.is_youtube:
uri = WebYouTube.get_uri_content(track.uri)
else:
return
GLib.idle_add(callback, uri, track, play)
def __init__(self):
"""
Init helper
"""
GObject.Object.__init__(self)
self.__helpers = [WebJmg90(), WebYouTube()]
def save_track(self, item, persistent):
"""
Save item into collection as track
@param item as SearchItem
@param persistent as DbPersistent
"""
t = Thread(target=self.__save_track_thread, args=(item, persistent))
t.daemon = True
t.start()
def save_album(self, item, persistent):
"""
Save item into collection as album
@param item as SearchItem
@param persistent as DbPersistent
"""
t = Thread(target=self.__save_album_thread,
args=(item, persistent))
t.daemon = True
t.start()
#######################
# PRIVATE #
#######################
def __save_album_thread(self, item, persistent):
"""
Save item into collection as album
@param item as SearchItem
@param persistent as DbPersistent
"""
nb_items = len(item.subitems)
# Should not happen but happen :-/
if nb_items == 0:
return
start = 0
album_artist = item.subitems[0].artists[0]
album_id = None
for track_item in item.subitems:
(album_id, track_id) = self.__save_track(track_item, persistent,
album_artist)
if track_id is None:
continue
# Download cover
if start == 0:
t = Thread(target=self.__save_cover, args=(item, album_id))
t.daemon = True
t.start()
start += 1
GLib.idle_add(self.emit, "progress", start / nb_items)
GLib.idle_add(self.emit, "progress", 1)
if Lp().settings.get_value('artist-artwork'):
Lp().art.cache_artists_info()
if album_id is not None:
GLib.idle_add(self.emit, "saved", album_id)
def __save_track_thread(self, item, persistent):
"""
Save item into collection as track
@param item as SearchItem
@param persistent as DbPersistent
"""
album_artist = item.artists[0]
(album_id, track_id) = self.__save_track(item, persistent,
album_artist)
if track_id is None:
return
self.__save_cover(item, album_id)
if Lp().settings.get_value('artist-artwork'):
Lp().art.cache_artists_info()
GLib.idle_add(self.emit, "saved", track_id)
def __save_track(self, item, persistent, album_artist):
"""
Save item into collection as track
@param item as SearchItem
@param persistent as DbPersistent
@param album artist as str
@return (album id as int, track id as int)
"""
# Get uri from helpers
for helper in self.__helpers:
uri = helper.get_uri(item)
if uri:
break
# Don't found anything
if not uri:
return (None, None)
track_id = Lp().tracks.get_id_by_uri(uri)
# Check if track needs to be updated
if track_id is not None:
if Lp().tracks.get_persistent(track_id) == DbPersistent.NONE\
and persistent == DbPersistent.EXTERNAL:
Lp().tracks.set_persistent(track_id, DbPersistent.EXTERNAL)
return (None, None)
t = TagReader()
with SqlCursor(Lp().db) as sql:
# Happen often with Itunes/Spotify
if album_artist not in item.artists:
item.artists.append(album_artist)
artists = "; ".join(item.artists)
artist_ids = t.add_artists(artists, album_artist, "")
album_artist_ids = t.add_album_artists(album_artist, "")
(album_id, new_album) = t.add_album(item.album,
album_artist_ids, "",
False, 0, 0, int(time()), True)
# FIXME: Check this, could move this in add_album()
if new_album:
Lp().albums.set_synced(album_id, Type.NONE)
if persistent == DbPersistent.CHARTS:
genre_ids = [Type.CHARTS]
new_artist_ids = []
else:
new_artist_ids = list(set(artist_ids) | set(album_artist_ids))
genre_ids = t.add_genres("Web", album_id)
# Add track to db
track_id = Lp().tracks.add(item.name, uri, item.duration,
0, item.discnumber, "", album_id,
item.year, 0, 0, 0, persistent)
t.update_track(track_id, artist_ids, genre_ids)
t.update_album(album_id, album_artist_ids, genre_ids, None)
sql.commit()
for genre_id in genre_ids:
GLib.idle_add(Lp().scanner.emit, 'genre-updated', genre_id, True)
for artist_id in new_artist_ids:
GLib.idle_add(Lp().scanner.emit, 'artist-updated', artist_id, True)
return (album_id, track_id)
def __save_cover(self, item, album_id):
"""
Save cover to store
@param item as SearchItem
@param album id as int
"""
f = Lio.File.new_for_uri(item.cover)
(status, data, tag) = f.load_contents(None)
if status:
Lp().art.save_album_artwork(data, album_id)
| gpl-3.0 | 2,696,436,820,573,915,000 | 36.619289 | 79 | 0.54716 | false |
pybel/pybel | src/pybel/io/nodelink.py | 1 | 7238 | # -*- coding: utf-8 -*-
"""Conversion functions for BEL graphs with node-link JSON."""
import gzip
import json
from io import BytesIO
from itertools import chain, count
from operator import methodcaller
from typing import Any, Mapping, TextIO, Union
from networkx.utils import open_file
from .utils import ensure_version
from ..constants import (
ANNOTATIONS, CITATION, FUSION, GRAPH_ANNOTATION_CURIE, GRAPH_ANNOTATION_LIST, GRAPH_ANNOTATION_MIRIAM, MEMBERS,
PARTNER_3P,
PARTNER_5P, PRODUCTS, REACTANTS, SOURCE_MODIFIER, TARGET_MODIFIER,
)
from ..dsl import BaseEntity
from ..language import citation_dict
from ..struct import BELGraph
from ..struct.graph import _handle_modifier
from ..tokens import parse_result_to_dsl
from ..utils import hash_edge, tokenize_version
__all__ = [
'to_nodelink',
'to_nodelink_file',
'to_nodelink_gz',
'to_nodelink_jsons',
'from_nodelink',
'from_nodelink_file',
'from_nodelink_gz',
'from_nodelink_jsons',
'to_nodelink_gz_io',
'from_nodelink_gz_io',
]
def to_nodelink(graph: BELGraph) -> Mapping[str, Any]:
"""Convert this graph to a node-link JSON object.
:param graph: BEL Graph
"""
graph_json_dict = _to_nodelink_json_helper(graph)
_prepare_graph_dict(graph_json_dict['graph'])
return graph_json_dict
def _prepare_graph_dict(g):
# Convert annotation list definitions (which are sets) to canonicalized/sorted lists
g[GRAPH_ANNOTATION_LIST] = {
keyword: list(sorted(values))
for keyword, values in g.get(GRAPH_ANNOTATION_LIST, {}).items()
}
g[GRAPH_ANNOTATION_CURIE] = list(sorted(g[GRAPH_ANNOTATION_CURIE]))
g[GRAPH_ANNOTATION_MIRIAM] = list(sorted(g[GRAPH_ANNOTATION_MIRIAM]))
@open_file(1, mode='w')
def to_nodelink_file(graph: BELGraph, path: Union[str, TextIO], **kwargs) -> None:
"""Write this graph as node-link JSON to a file.
:param graph: A BEL graph
:param path: A path or file-like
"""
graph_json_dict = to_nodelink(graph)
json.dump(graph_json_dict, path, ensure_ascii=False, **kwargs)
def to_nodelink_gz(graph, path: str, **kwargs) -> None:
"""Write a graph as node-link JSON to a gzip file."""
with gzip.open(path, 'wt') as file:
json.dump(to_nodelink(graph), file, ensure_ascii=False, **kwargs)
def to_nodelink_jsons(graph: BELGraph, **kwargs) -> str:
"""Dump this graph as a node-link JSON object to a string."""
return json.dumps(to_nodelink(graph), ensure_ascii=False, **kwargs)
def from_nodelink(graph_json_dict: Mapping[str, Any], check_version: bool = True) -> BELGraph:
"""Build a graph from node-link JSON Object."""
pybel_version = tokenize_version(graph_json_dict['graph']['pybel_version'])
if pybel_version[1] < 14: # if minor version is less than 14
raise ValueError('Invalid NodeLink JSON from old version of PyBEL (v{}.{}.{})'.format(*pybel_version))
graph = _from_nodelink_json_helper(graph_json_dict)
return ensure_version(graph, check_version=check_version)
@open_file(0, mode='r')
def from_nodelink_file(path: Union[str, TextIO], check_version: bool = True) -> BELGraph:
"""Build a graph from the node-link JSON contained in the given file.
:param path: A path or file-like
"""
return from_nodelink(json.load(path), check_version=check_version)
def from_nodelink_gz(path: str) -> BELGraph:
"""Read a graph as node-link JSON from a gzip file."""
with gzip.open(path, 'rt') as file:
return from_nodelink(json.load(file))
def from_nodelink_jsons(graph_json_str: str, check_version: bool = True) -> BELGraph:
"""Read a BEL graph from a node-link JSON string."""
return from_nodelink(json.loads(graph_json_str), check_version=check_version)
def _to_nodelink_json_helper(graph: BELGraph) -> Mapping[str, Any]:
"""Convert a BEL graph to a node-link format.
:param graph: BEL Graph
Adapted from :func:`networkx.readwrite.json_graph.node_link_data`
"""
nodes = sorted(graph, key=methodcaller('as_bel'))
mapping = dict(zip(nodes, count()))
return {
'directed': True,
'multigraph': True,
'graph': graph.graph.copy(),
'nodes': [
_augment_node(node)
for node in nodes
],
'links': [
dict(
chain(
data.copy().items(),
[('source', mapping[u]), ('target', mapping[v]), ('key', key)],
),
)
for u, v, key, data in graph.edges(keys=True, data=True)
],
}
def _augment_node(node: BaseEntity) -> BaseEntity:
"""Add the SHA-512 identifier to a node's dictionary."""
rv = node.copy()
rv['id'] = node.md5
rv['bel'] = node.as_bel()
for m in chain(node.get(MEMBERS, []), node.get(REACTANTS, []), node.get(PRODUCTS, [])):
m.update(_augment_node(m))
if FUSION in node:
node[FUSION][PARTNER_3P].update(_augment_node(node[FUSION][PARTNER_3P]))
node[FUSION][PARTNER_5P].update(_augment_node(node[FUSION][PARTNER_5P]))
return rv
def _recover_graph_dict(graph: BELGraph):
graph.graph[GRAPH_ANNOTATION_LIST] = {
keyword: set(values)
for keyword, values in graph.graph.get(GRAPH_ANNOTATION_LIST, {}).items()
}
graph.graph[GRAPH_ANNOTATION_CURIE] = set(graph.graph.get(GRAPH_ANNOTATION_CURIE, []))
graph.graph[GRAPH_ANNOTATION_MIRIAM] = set(graph.graph.get(GRAPH_ANNOTATION_MIRIAM, []))
def _from_nodelink_json_helper(data: Mapping[str, Any]) -> BELGraph:
"""Return graph from node-link data format.
Adapted from :func:`networkx.readwrite.json_graph.node_link_graph`
"""
graph = BELGraph()
graph.graph = data.get('graph', {})
_recover_graph_dict(graph)
mapping = []
for node_data in data['nodes']:
node = parse_result_to_dsl(node_data)
graph.add_node_from_data(node)
mapping.append(node)
for data in data['links']:
u = mapping[data['source']]
v = mapping[data['target']]
edge_data = {
k: v
for k, v in data.items()
if k not in {'source', 'target', 'key'}
}
for side in (SOURCE_MODIFIER, TARGET_MODIFIER):
side_data = edge_data.get(side)
if side_data:
_handle_modifier(side_data)
if CITATION in edge_data:
edge_data[CITATION] = citation_dict(**edge_data[CITATION])
if ANNOTATIONS in edge_data:
edge_data[ANNOTATIONS] = graph._clean_annotations(edge_data[ANNOTATIONS])
graph.add_edge(u, v, key=hash_edge(u, v, edge_data), **edge_data)
return graph
def to_nodelink_gz_io(graph: BELGraph) -> BytesIO:
"""Get a BEL graph as a compressed BytesIO."""
bytes_io = BytesIO()
with gzip.GzipFile(fileobj=bytes_io, mode='w') as file:
s = to_nodelink_jsons(graph)
file.write(s.encode('utf-8'))
bytes_io.seek(0)
return bytes_io
def from_nodelink_gz_io(bytes_io: BytesIO) -> BELGraph:
"""Get BEL from gzipped nodelink JSON."""
with gzip.GzipFile(fileobj=bytes_io, mode='r') as file:
s = file.read()
j = s.decode('utf-8')
return from_nodelink_jsons(j)
| mit | 2,647,336,991,651,230,700 | 31.168889 | 115 | 0.635811 | false |
seecr/meresco-examples | meresco/__init__.py | 1 | 1394 | ## begin license ##
#
# "Meresco Examples" is a project demonstrating some of the
# features of various components of the "Meresco Suite".
# Also see http://meresco.org.
#
# Copyright (C) 2007-2008 SURF Foundation. http://www.surf.nl
# Copyright (C) 2007-2010 Seek You Too (CQ2) http://www.cq2.nl
# Copyright (C) 2007-2009 Stichting Kennisnet Ict op school. http://www.kennisnetictopschool.nl
# Copyright (C) 2009 Delft University of Technology http://www.tudelft.nl
# Copyright (C) 2009 Tilburg University http://www.uvt.nl
#
# This file is part of "Meresco Examples"
#
# "Meresco Examples" is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# "Meresco Examples" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with "Meresco Examples"; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
## end license ##
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
| gpl-2.0 | -214,853,663,805,945,660 | 42.5625 | 95 | 0.738164 | false |
Mozu/mozu-python-sdk | mozurestsdk/platform/tenantextensions.py | 1 | 2242 |
"""
This code was generated by Codezu.
Changes to this file may cause incorrect behavior and will be lost if
the code is regenerated.
"""
from mozurestsdk.mozuclient import default as default_client
from mozurestsdk.mozuurl import MozuUrl;
from mozurestsdk.urllocation import UrlLocation
from mozurestsdk.apicontext import ApiContext;
class TenantExtensions(object):
def __init__(self, apiContext: ApiContext = None, mozuClient = None):
self.client = mozuClient or default_client();
if (apiContext is not None):
self.client.withApiContext(apiContext);
else:
self.client.withApiContext(ApiContext());
def getExtensions(self,responseFields = None):
""" Retrieves the Arc.js configuration settings for a site.
Args:
| responseFields (string) - Filtering syntax appended to an API call to increase or decrease the amount of data returned inside a JSON object. This parameter should only be used to retrieve data. Attempting to update data using this parameter may cause data loss.
Returns:
| TenantExtensions
Raises:
| ApiException
"""
url = MozuUrl("/api/platform/extensions/?responseFields={responseFields}", "GET", UrlLocation.TenantPod, False);
url.formatUrl("responseFields", responseFields);
self.client.withResourceUrl(url).execute();
return self.client.result();
def updateExtensions(self,extensions, responseFields = None):
""" Updates the Arc.js configuration settings for a site.
Args:
| extensions(extensions) - The updated details of the Arc.js configuration settings.
| responseFields (string) - Filtering syntax appended to an API call to increase or decrease the amount of data returned inside a JSON object. This parameter should only be used to retrieve data. Attempting to update data using this parameter may cause data loss.
Returns:
| TenantExtensions
Raises:
| ApiException
"""
url = MozuUrl("/api/platform/extensions/?responseFields={responseFields}", "PUT", UrlLocation.TenantPod, False);
url.formatUrl("responseFields", responseFields);
self.client.withResourceUrl(url).withBody(extensions).execute();
return self.client.result();
| apache-2.0 | -7,380,671,656,172,497,000 | 32.181818 | 266 | 0.727029 | false |
nigelb/SerialGrabber | examples/MQTT/SerialGrabber_Settings.py | 1 | 2043 | #!/usr/bin/env python
# SerialGrabber reads data from a serial port and processes it with the
# configured processor.
# Copyright (C) 2012 NigelB
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import serial
from serial_grabber.extractors import TransactionExtractor
from serial_grabber.reader.SerialReader import SerialReader
from serial_grabber.processor.UploadProcessor import UploadProcessor
from serial_grabber.processor import CompositeProcessor
from serial_grabber.mqtt import MqttCommander
from serial_grabber.connections import SerialConnection
# Serial Settings
timeout = 1
port = "/dev/ttyUSB0"
baud = 57600
parity = serial.PARITY_NONE
stop_bits = 1
# MQTT settings
mqtt_host = "localhost"
mqtt_port = 1883
mqtt_auth = ('system', 'manager')
# Settings
cache_collision_avoidance_delay = 1
processor_sleep = 1
watchdog_sleep = 1
reader_error_sleep = 1
drop_carriage_return = True
transaction = TransactionExtractor("default", "BEGIN DATA", "END DATA")
reader = SerialReader(transaction,
1000,
SerialConnection(port, baud, timeout=timeout,
parity=parity, stop_bits=stop_bits))
commander = MqttCommander(mqtt_host, mqtt_port, mqtt_auth)
uploadProcessor = UploadProcessor("https://example.org/cgi-bin/upload.py")
processor = CompositeProcessor([commander.processor, uploadProcessor])
| gpl-2.0 | 1,560,382,958,900,706,000 | 33.05 | 75 | 0.751836 | false |
lqmanh/daethon | test_daethon.py | 1 | 1817 | import os
import sys
import time
import pytest
from daethon import Daemon
class TDaemon(Daemon):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
with open('testing_daemon', 'w') as f:
f.write('inited')
def run(self):
time.sleep(1)
with open('testing_daemon', 'w') as f:
f.write('finished')
def file_contains(path, s):
with open(path) as f:
return f.read() == s
def control_daemon(action):
os.system(' '.join((sys.executable, __file__, action)))
@pytest.fixture
def context():
control_daemon('start')
time.sleep(0.5)
yield
if os.path.exists('testing_daemon.pid'):
control_daemon('stop')
time.sleep(0.5)
os.system('rm testing_daemon*') # clean up files if necessary
def test_daemon_can_start(context):
assert os.path.exists('testing_daemon.pid')
assert file_contains('testing_daemon', 'inited')
def test_daemon_can_stop(context):
control_daemon('stop')
time.sleep(0.5)
assert not os.path.exists('testing_daemon.pid')
assert file_contains('testing_daemon', 'inited')
def test_daemon_can_finish(context):
time.sleep(1)
assert not os.path.exists('testing_daemon.pid')
assert file_contains('testing_daemon', 'finished')
def test_daemon_can_restart(context):
with open('testing_daemon.pid') as f:
pid1 = f.read()
time.sleep(0.5)
control_daemon('restart')
time.sleep(0.5)
with open('testing_daemon.pid') as f:
pid2 = f.read()
assert pid1 != pid2
if __name__ == '__main__':
if len(sys.argv) == 2:
arg = sys.argv[1]
if arg in ('start', 'stop', 'restart'):
d = TDaemon('testing_daemon.pid', verbose=0)
getattr(d, arg)()
else:
pytest.main()
| apache-2.0 | -3,047,666,419,461,129,700 | 22.597403 | 66 | 0.603192 | false |
msteinhoff/foption-bot | src/python/core/messages.py | 1 | 2244 | # -*- coding: UTF-8 -*-
"""
$Id$
$URL$
Copyright (c) 2010 foption
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
@since Jan 6, 2011
@author Mario Steinhoff
This file contains all messages with associated message numbers that are
used through the whole project. When defining new messages, please use
named parameters wherever possible.
Currently, the following number ranges are defined:
00000-09999: Core
00001-00999: Bot
01000-01999: Config
10000-19999: Modules
20000-29999: Interaction
20001-20200: IRC
"""
__version__ = '$Rev$'
__all__ = [
'message'
]
message = {}
message[01000] = 'configuration saved'
message[01001] = 'unable to save configuration'
message[01002] = 'configuration loaded'
message[01003] = 'unable to load configuration: config file was found'
message[20001] = ''
message[20002] = ''
message[20003] = ''
message[20005] = ''
message[20006] = ''
message[20007] = ''
message[20008] = ''
message[20009] = ''
message[20010] = ''
message[20011] = ''
message[20012] = ''
message[20013] = ''
message[20014] = ''
message[20015] = ''
message[20016] = ''
#reply.add('deine mutter hat gefailed.')
#return "OHFUCKOHFUCKOHFUCK Etwas lief schief! Datenbankfehler"
#return "Error 555!"
#reply.add('Deine Mutter hat die Datenbank gefressen')
| mit | 6,695,913,913,734,353,000 | 27.769231 | 77 | 0.750446 | false |
logicabrity/aeon | test/test_measurement.py | 1 | 1229 | import time
import pytest
from aeon.measurement import Measurement
from aeon.errors import InvalidMeasurementState
def test_cant_start_measurement_twice():
m = Measurement("name", "group")
m.start()
with pytest.raises(InvalidMeasurementState):
m.start()
def test_cant_stop_measurement_before_starting_it():
m = Measurement("name", "group")
with pytest.raises(InvalidMeasurementState):
m.stop()
def test_cant_stop_measurement_twice():
m = Measurement("name", "group")
m.start()
m.stop()
with pytest.raises(InvalidMeasurementState):
m.stop()
def test_starting_measurement_increases_number_of_calls():
m = Measurement("name", "group")
assert m.calls == 0
m.start()
assert m.calls == 1
def test_measurement_measures_something():
m = Measurement("name", "group")
m.start()
time.sleep(1e-3)
m.stop()
elapsed = m.total_runtime
assert elapsed > 0
m.start()
time.sleep(1e-3)
m.stop()
elapsed_again = m.total_runtime
assert elapsed_again > elapsed
@pytest.mark.fixed
def test_measurement_has_name_and_group():
m = Measurement("name", "group")
assert m.name == "name"
assert m.group == "group"
| mit | -1,662,716,423,659,156,000 | 21.345455 | 58 | 0.656631 | false |
maas/maas | src/maasserver/middleware.py | 1 | 18463 | # Copyright 2012-2016 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Access middleware."""
import http.client
import json
import logging
from pprint import pformat
import sys
import traceback
import attr
from crochet import TimeoutError
from django.conf import settings
from django.core.exceptions import PermissionDenied, ValidationError
from django.core.handlers.exception import get_exception_response
from django.http import (
Http404,
HttpResponse,
HttpResponseBadRequest,
HttpResponseForbidden,
HttpResponseRedirect,
)
from django.urls import get_resolver, get_urlconf, reverse
from django.utils.encoding import force_str
from django.utils.http import urlquote_plus
from maasserver import logger
from maasserver.clusterrpc.utils import get_error_message_for_exception
from maasserver.components import (
discard_persistent_error,
register_persistent_error,
)
from maasserver.enum import COMPONENT
from maasserver.exceptions import MAASAPIException
from maasserver.models.config import Config
from maasserver.models.node import RackController
from maasserver.rbac import rbac
from maasserver.rpc import getAllClients
from maasserver.utils.orm import is_retryable_failure
from provisioningserver.rpc.exceptions import (
NoConnectionsAvailable,
PowerActionAlreadyInProgress,
)
from provisioningserver.utils.shell import ExternalProcessError
# 'Retry-After' header sent for httplib.SERVICE_UNAVAILABLE
# responses.
RETRY_AFTER_SERVICE_UNAVAILABLE = 10
PUBLIC_URL_PREFIXES = [
# Login page: must be visible to anonymous users.
reverse("login"),
# Authentication: must be visible to anonymous users.
reverse("authenticate"),
reverse("discharge-request"),
# CSRF: only usable by logged in users, but returns FORBIDDEN instead of
# a redirect to the login page on request of an unauthenticated user.
reverse("csrf"),
# The combo loaders are publicly accessible.
reverse("robots"),
# Metadata service is for use by nodes; no login.
reverse("metadata"),
# RPC information is for use by rack controllers; no login.
reverse("rpc-info"),
# Prometheus metrics with usage stats
reverse("metrics"),
# API meta-information is publicly visible.
reverse("api_version"),
reverse("api_v1_error"),
# API calls are protected by piston.
settings.API_URL_PREFIX,
# Boot resources simple streams endpoint; no login.
settings.SIMPLESTREAMS_URL_PREFIX,
]
def is_public_path(path):
"""Whether a request.path is publicly accessible."""
return any(path.startswith(prefix) for prefix in PUBLIC_URL_PREFIXES)
class AccessMiddleware:
"""Protect access to views.
Most UI views are visible only to logged-in users, but there are pages
that are accessible to anonymous users (e.g. the login page!) or that
use other authentication (e.g. the MAAS API, which is managed through
piston).
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
if is_public_path(request.path):
return self.get_response(request)
if request.user.is_anonymous:
return HttpResponseRedirect(
"/MAAS/?next=%s" % urlquote_plus(request.path)
)
return self.get_response(request)
class ExternalComponentsMiddleware:
"""Middleware to check external components at regular intervals."""
def __init__(self, get_response):
self.get_response = get_response
def _check_rack_controller_connectivity(self):
"""Check each rack controller to see if it's connected.
If any rack controllers are disconnected, add a persistent error.
"""
controllers = RackController.objects.all()
connected_ids = {client.ident for client in getAllClients()}
disconnected_controllers = {
controller
for controller in controllers
if controller.system_id not in connected_ids
}
if len(disconnected_controllers) == 0:
discard_persistent_error(COMPONENT.RACK_CONTROLLERS)
else:
if len(disconnected_controllers) == 1:
message = (
"One rack controller is not yet connected to the region"
)
else:
message = (
"%d rack controllers are not yet connected to the region"
% len(disconnected_controllers)
)
message = (
'%s. Visit the <a href="/MAAS/l/controllers">'
"rack controllers page</a> for "
"more information." % message
)
register_persistent_error(COMPONENT.RACK_CONTROLLERS, message)
def __call__(self, request):
# This middleware hijacks the request to perform checks. Any
# error raised during these checks should be caught to avoid
# disturbing the handling of the request. Proper error reporting
# should be handled in the check method itself.
self._check_rack_controller_connectivity()
return self.get_response(request)
class ExceptionMiddleware:
"""Convert exceptions into appropriate HttpResponse responses.
For example, a MAASAPINotFound exception processed by a middleware
based on this class will result in an http 404 response to the client.
Validation errors become "bad request" responses.
.. middleware: https://docs.djangoproject.com
/en/dev/topics/http/middleware/
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
try:
return self.get_response(request)
except Exception as exception:
response = self.process_exception(request, exception)
if response:
return response
else:
raise
def process_exception(self, request, exception):
encoding = "utf-8"
if isinstance(exception, MAASAPIException):
# Print a traceback if this is a 500 error.
if (
settings.DEBUG
or exception.api_error == http.client.INTERNAL_SERVER_ERROR
):
self.log_exception(exception)
# This type of exception knows how to translate itself into
# an http response.
return exception.make_http_response()
elif isinstance(exception, ValidationError):
if settings.DEBUG:
self.log_exception(exception)
if hasattr(exception, "message_dict"):
# Complex validation error with multiple fields:
# return a json version of the message_dict.
return HttpResponseBadRequest(
json.dumps(exception.message_dict),
content_type="application/json",
)
else:
# Simple validation error: return the error message.
return HttpResponseBadRequest(
str("".join(exception.messages)).encode(encoding),
content_type="text/plain; charset=%s" % encoding,
)
elif isinstance(exception, PermissionDenied):
if settings.DEBUG:
self.log_exception(exception)
return HttpResponseForbidden(
content=str(exception).encode(encoding),
content_type="text/plain; charset=%s" % encoding,
)
elif isinstance(exception, ExternalProcessError):
# Catch problems interacting with processes that the
# appserver spawns, e.g. rndc.
#
# While this is a serious error, it should be a temporary
# one as the admin should be checking and fixing, or it
# could be spurious. There's no way of knowing, so the best
# course of action is to ask the caller to repeat.
if settings.DEBUG:
self.log_exception(exception)
response = HttpResponse(
content=str(exception).encode(encoding),
status=int(http.client.SERVICE_UNAVAILABLE),
content_type="text/plain; charset=%s" % encoding,
)
response["Retry-After"] = RETRY_AFTER_SERVICE_UNAVAILABLE
return response
elif isinstance(exception, Http404):
if settings.DEBUG:
self.log_exception(exception)
return get_exception_response(
request, get_resolver(get_urlconf()), 404, exception
)
elif is_retryable_failure(exception):
# We never handle retryable failures.
return None
elif isinstance(exception, SystemExit):
return None
else:
# Print a traceback.
self.log_exception(exception)
# Return an API-readable "Internal Server Error" response.
return HttpResponse(
content=str(exception).encode(encoding),
status=int(http.client.INTERNAL_SERVER_ERROR),
content_type="text/plain; charset=%s" % encoding,
)
def log_exception(self, exception):
exc_info = sys.exc_info()
logger.error(" Exception: %s ".center(79, "#") % str(exception))
logger.error("".join(traceback.format_exception(*exc_info)))
class DebuggingLoggerMiddleware:
log_level = logging.DEBUG
def __init__(self, get_response):
self.get_response = get_response
# Taken straight out of Django 1.8 django.http.request module to improve
# our debug output on requests (dropped in Django 1.9).
@classmethod
def _build_request_repr(
self,
request,
path_override=None,
GET_override=None,
POST_override=None,
COOKIES_override=None,
META_override=None,
):
"""
Builds and returns the request's representation string. The request's
attributes may be overridden by pre-processed values.
"""
# Since this is called as part of error handling, we need to be very
# robust against potentially malformed input.
try:
get = (
pformat(GET_override)
if GET_override is not None
else pformat(request.GET)
)
except Exception:
get = "<could not parse>"
try:
post = (
pformat(POST_override)
if POST_override is not None
else pformat(request.POST)
)
except Exception:
post = "<could not parse>"
try:
cookies = (
pformat(COOKIES_override)
if COOKIES_override is not None
else pformat(request.COOKIES)
)
except Exception:
cookies = "<could not parse>"
try:
meta = (
pformat(META_override)
if META_override is not None
else pformat(request.META)
)
except Exception:
meta = "<could not parse>"
path = path_override if path_override is not None else request.path
name = request.__class__.__name__
return force_str(
f"<{name}\npath:{path},\nGET:{get},\nPOST:{post},\nCOOKIES:{cookies},\nMETA:{meta}>"
)
def __call__(self, request):
if settings.DEBUG_HTTP and logger.isEnabledFor(self.log_level):
header = " Request dump ".center(79, "#")
logger.log(
self.log_level,
"%s\n%s",
header,
self._build_request_repr(request),
)
response = self.get_response(request)
if settings.DEBUG_HTTP and logger.isEnabledFor(self.log_level):
header = " Response dump ".center(79, "#")
content = getattr(response, "content", "{no content}")
try:
decoded_content = content.decode("utf-8")
except UnicodeDecodeError:
logger.log(
self.log_level,
"%s\n%s",
header,
"** non-utf-8 (binary?) content **",
)
else:
logger.log(self.log_level, "%s\n%s", header, decoded_content)
return response
class RPCErrorsMiddleware:
"""A middleware for handling RPC errors."""
handled_exceptions = (
NoConnectionsAvailable,
PowerActionAlreadyInProgress,
TimeoutError,
)
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
try:
return self.get_response(request)
except Exception as exception:
response = self.process_exception(request, exception)
if response:
return response
else:
raise
def process_exception(self, request, exception):
if request.path.startswith(settings.API_URL_PREFIX):
# Not a path we're handling exceptions for.
# APIRPCErrorsMiddleware handles all the API request RPC
# errors.
return None
if not isinstance(exception, self.handled_exceptions):
# Nothing to do, since we don't care about anything other
# than handled_exceptions.
return None
logging.exception(exception)
return HttpResponseRedirect(request.path)
class APIRPCErrorsMiddleware(RPCErrorsMiddleware):
"""A middleware for handling RPC errors in API requests."""
handled_exceptions = {
NoConnectionsAvailable: int(http.client.SERVICE_UNAVAILABLE),
PowerActionAlreadyInProgress: int(http.client.SERVICE_UNAVAILABLE),
TimeoutError: int(http.client.GATEWAY_TIMEOUT),
}
def process_exception(self, request, exception):
if not request.path.startswith(settings.API_URL_PREFIX):
# Not a path we're handling exceptions for.
# RPCErrorsMiddleware handles non-API requests.
return None
if exception.__class__ not in self.handled_exceptions:
# This isn't something we handle; allow processing to
# continue.
return None
status = self.handled_exceptions[exception.__class__]
logging.exception(exception)
error_message = get_error_message_for_exception(exception)
encoding = "utf-8"
response = HttpResponse(
content=error_message.encode(encoding),
status=status,
content_type="text/plain; charset=%s" % encoding,
)
if status == http.client.SERVICE_UNAVAILABLE:
response["Retry-After"] = RETRY_AFTER_SERVICE_UNAVAILABLE
return response
class CSRFHelperMiddleware:
"""A Middleware to decide whether a request needs to be protected against
CSRF attacks.
Requests with a session cookie (i.e. requests for which the basic
session-based Django authentification is used) will be CSRF protected.
Requests without this cookie are pure 0-legged API requests and thus don't
need to use the CSRF protection machinery because each request is signed.
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
session_cookie = request.COOKIES.get(
settings.SESSION_COOKIE_NAME, None
)
if session_cookie is None:
# csrf_processing_done is a field used by Django. We use it here
# to bypass the CSRF protection when it's not needed (i.e. when the
# request is OAuth-authenticated).
request.csrf_processing_done = True
return self.get_response(request)
@attr.s
class ExternalAuthInfo:
"""Hold information about external authentication."""
type = attr.ib()
url = attr.ib()
domain = attr.ib(default="")
admin_group = attr.ib(default="")
class ExternalAuthInfoMiddleware:
"""A Middleware adding information about the external authentication.
This adds an `external_auth_info` attribute to the request, which is an
ExternalAuthInfo instance if external authentication is enabled, None
otherwise.
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
configs = Config.objects.get_configs(
[
"external_auth_url",
"external_auth_domain",
"external_auth_admin_group",
"rbac_url",
]
)
rbac_endpoint = configs.get("rbac_url")
candid_endpoint = configs.get("external_auth_url")
auth_endpoint, auth_domain, auth_admin_group = "", "", ""
if rbac_endpoint:
auth_type = "rbac"
auth_endpoint = rbac_endpoint.rstrip("/") + "/auth"
elif candid_endpoint:
auth_type = "candid"
auth_endpoint = candid_endpoint
auth_domain = configs.get("external_auth_domain")
auth_admin_group = configs.get("external_auth_admin_group")
auth_info = None
if auth_endpoint:
# strip trailing slashes as js-bakery ends up using double slashes
# in the URL otherwise
auth_info = ExternalAuthInfo(
type=auth_type,
url=auth_endpoint.rstrip("/"),
domain=auth_domain,
admin_group=auth_admin_group,
)
request.external_auth_info = auth_info
return self.get_response(request)
class RBACMiddleware:
"""Middleware that cleans the RBAC thread-local cache.
At the end of each request the RBAC client that is held in the thread-local
needs to be cleaned up. That way the next request on the same thread will
use a new RBAC client.
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
result = self.get_response(request)
# Now that the response has been handled, clear the thread-local
# state of the RBAC connection.
rbac.clear()
return result
| agpl-3.0 | -8,170,894,622,841,870,000 | 34.505769 | 96 | 0.611222 | false |
ivanamihalek/tcga | icgc/60_nextgen_production/65_reactome_tree.py | 1 | 5057 | #! /usr/bin/python3
#
# This source code is part of icgc, an ICGC processing pipeline.
#
# Icgc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Icgc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see<http://www.gnu.org/licenses/>.
#
# Contact: [email protected]
#
# some pathways do not have the associated genes listed, probably by mistake
# examples:
# R-HSA-1483171 | Synthesis of BMP
# R-HSA-2408499 | Formation of selenosugars for excretion
from icgc_utils.common_queries import quotify
from icgc_utils.reactome import *
from config import Config
############
def print_genes(cursor, gene_ids, depth):
if len(gene_ids)<1:
print("\t"*depth, "no genes listed")
return
#print("\t"*depth, "print genes here")
gene_id_string = ",".join([quotify(z) for z in gene_ids])
qry = "select ensembl_gene_id, approved_name from hgnc where ensembl_gene_id in (%s)" % gene_id_string
gene_names = dict(hard_landing_search(cursor, qry))
qry = "select ensembl_gene_id, approved_symbol from hgnc where ensembl_gene_id in (%s)" % gene_id_string
gene_symbols = dict(hard_landing_search(cursor, qry))
for gene in gene_ids:
print("\t"*depth, gene_symbols.get(gene,""), gene_names.get(gene,""))
return
##############
def characterize_subtree(cursor, graph, pthwy_id, gene_groups, depth, verbose=True):
# this is the whole subtree
# children = [node for node in nx.dfs_preorder_nodes(graph, pthwy_id)]
# A successor of n is a node m such that there exists a directed edge from n to m.
children = [node for node in graph.successors(pthwy_id)]
if len(children)==0: return False
node_id_string = ",".join([quotify(z) for z in children])
qry_template = "select * from reactome_pathways where reactome_pathway_id in (%s)"
children_names = hard_landing_search(cursor, qry_template % node_id_string)
for child_id, child_name in children_names:
# number_of_genes = genes related to nodes without descendants
genes = genes_in_subgraph(cursor, graph, child_id)
if verbose: print("\t"*depth, child_id, child_name, len(genes))
if len(genes)<100:
if verbose: print_genes(cursor, genes, depth+1)
gene_groups[child_name] = genes
continue
if not characterize_subtree(cursor, graph, child_id, gene_groups, depth+1, verbose=verbose): # no further subdivisions
if verbose: print_genes(cursor, genes, depth+1)
gene_groups[child_name] = genes
continue
return True
#########################################
import numpy as np
from matplotlib import pyplot as plt
def hist_plot(gene_groups):
data = [len(gene_list) for gene_list in list(gene_groups.values())]
# fixed bin size
bins = np.arange(0, 505, 5) # fixed bin size
plt.xlim(0,500)
plt.hist(data, bins=bins, alpha=0.5)
# plt.title('')
plt.xlabel('number of genes in group (bin size = 5)')
plt.ylabel('number of groups')
#
plt.show()
####################################################
def main():
verbose = False
db = connect_to_mysql(Config.mysql_conf_file)
cursor = db.cursor()
switch_to_db(cursor, 'icgc')
# are there children with multiple parents? Yes. So I need some kind of
# directed graph, rather tha a tree.
qry = "select child, count(distinct parent) as ct from reactome_hierarchy "
qry += "group by child having ct>1"
ret = search_db(cursor, qry)
print("number of children with multiple parents:", len(ret))
# feed the parent/child pairs as edges into graph
graph = build_reactome_graph(cursor, verbose=True)
# candidate roots
zero_in_degee_nodes = get_roots(graph)
node_id_string = ",".join([quotify(z) for z in zero_in_degee_nodes])
qry_template = "select * from reactome_pathways where reactome_pathway_id in (%s)"
root_names = hard_landing_search(cursor, qry_template% node_id_string)
gene_groups = {}
for pthwy_id, name in root_names:
if "disease" in name.lower(): continue
if verbose: print(pthwy_id, name)
characterize_subtree(cursor, graph, pthwy_id, gene_groups, 1, verbose=verbose)
print("\n===========================")
max_group=0
for group, genes in gene_groups.items():
groupsize = len(genes)
if max_group< groupsize: max_group=groupsize
print (group, len(genes))
print("\n===========================")
print("number of groups", len(gene_groups))
print("largest group", max_group)
print("\n===========================")
for pthwy_name, genes in gene_groups.items():
if len(genes)<=150: continue
print("\n",pthwy_name, len(genes))
#print_genes(cursor, genes, 1)
#hist_plot(gene_groups)
cursor.close()
db.close()
#########################################
if __name__ == '__main__':
main()
| gpl-3.0 | -438,034,926,294,668,700 | 35.121429 | 120 | 0.677477 | false |
soccermetrics/marcotti-mls | marcottimls/models/financial.py | 1 | 5658 | from sqlalchemy import Column, Integer, String, Sequence, ForeignKey, ForeignKeyConstraint, Boolean
from sqlalchemy.orm import relationship, backref
from sqlalchemy.schema import CheckConstraint
import enums
from common import BaseSchema
class AcquisitionPaths(BaseSchema):
"""
MLS player acquisition data model.
Captures **initial** entry path into league.
"""
__tablename__ = 'acquisitions'
player_id = Column(Integer, ForeignKey('players.id'), primary_key=True)
year_id = Column(Integer, ForeignKey('years.id'), primary_key=True)
path = Column(enums.AcquisitionType.db_type())
discriminator = Column('type', String(20))
club_id = Column(Integer, ForeignKey('clubs.id'))
club = relationship('Clubs', backref=backref('acquisitions'))
player = relationship('Players', backref=backref('entry'))
year = relationship('Years', backref=backref('acquisitions'))
__mapper_args__ = {
'polymorphic_identity': 'acquisitions',
'polymorphic_on': discriminator
}
class PlayerDrafts(AcquisitionPaths):
"""
Player draft data model.
"""
__mapper_args__ = {'polymorphic_identity': 'draft'}
round = Column(Integer, CheckConstraint('round > 0'))
selection = Column(Integer, CheckConstraint('selection > 0'))
gen_adidas = Column(Boolean, default=False)
def __repr__(self):
return u"<PlayerDraft(name={0}, year={1}, round={2}, selection={3}, generation_adidas={4})>".format(
self.player.full_name, self.year.yr, self.round, self.selection, self.gen_adidas).encode('utf-8')
def __unicode__(self):
return u"<PlayerDraft(name={0}, year={1}, round={2}, selection={3}, generation_adidas={4})>".format(
self.player.full_name, self.year.yr, self.round, self.selection, self.gen_adidas)
class PlayerSalaries(BaseSchema):
"""
Player salary data model.
"""
__tablename__ = 'salaries'
__table_args__ = (
ForeignKeyConstraint(
['competition_id', 'season_id'],
['competition_seasons.competition_id', 'competition_seasons.season_id'],
),
)
id = Column(Integer, Sequence('salary_id_seq', start=10000), primary_key=True)
base_salary = Column(Integer, CheckConstraint('base_salary >= 0'), doc="Base salary in cents")
avg_guaranteed = Column(Integer, CheckConstraint('avg_guaranteed >= 0'),
doc="Average annualized guaranteed compensation in cents")
player_id = Column(Integer, ForeignKey('players.id'))
club_id = Column(Integer, ForeignKey('clubs.id'))
competition_id = Column(Integer)
season_id = Column(Integer)
player = relationship('Players', backref=backref('salaries'))
club = relationship('Clubs', backref=backref('payroll'))
comp_season = relationship('CompetitionSeasons', backref=backref('payroll'))
def __repr__(self):
return u"<PlayerSalary(name={0}, club={1}, competition={2}, season={3}, base={4:.2f}, " \
u"guaranteed={5:.2f})>".format(self.player.full_name, self.club.name,
self.comp_season.competition.name, self.comp_season.season.name,
self.base_salary/100.00, self.avg_guaranteed/100.00).encode('utf-8')
def __unicode__(self):
return u"<PlayerSalary(name={0}, club={1}, competition={2}, season={3}, base={4:.2f}, " \
u"guaranteed={5:.2f})>".format(self.player.full_name, self.club.name,
self.comp_season.competition.name, self.comp_season.season.name,
self.base_salary / 100.00, self.avg_guaranteed / 100.00)
class PartialTenures(BaseSchema):
"""
Data model that captures player's partial-season tenure at a club.
"""
__tablename__ = 'partials'
__table_args__ = (
ForeignKeyConstraint(
['competition_id', 'season_id'],
['competition_seasons.competition_id', 'competition_seasons.season_id'],
),
)
id = Column(Integer, Sequence('partial_id_seq', start=10000), primary_key=True)
start_week = Column(Integer, CheckConstraint('start_week > 0'))
end_week = Column(Integer, CheckConstraint('end_week > 0'))
player_id = Column(Integer, ForeignKey('players.id'))
club_id = Column(Integer, ForeignKey('clubs.id'))
competition_id = Column(Integer)
season_id = Column(Integer)
player = relationship('Players', backref=backref('partials'))
club = relationship('Clubs', backref=backref('partials'))
comp_season = relationship('CompetitionSeasons', backref=backref('partials'))
def __repr__(self):
return u"<PartialTenure(name={0}, club={1}, competition={2}, season={3}, " \
u"start_week={4}, end_week={5})>".format(self.player.full_name, self.club.name,
self.comp_season.competition.name,
self.comp_season.season.name,
self.start_week, self.end_week).encode('utf-8')
def __unicode__(self):
return u"<PartialTenure(name={0}, club={1}, competition={2}, season={3}, " \
u"start_week={4}, end_week={5})>".format(self.player.full_name, self.club.name,
self.comp_season.competition.name,
self.comp_season.season.name,
self.start_week, self.end_week)
| mit | -645,660,621,350,676,700 | 42.19084 | 114 | 0.59597 | false |
koepferl/FluxCompensator | fluxcompensator/cube.py | 1 | 25142 | from copy import deepcopy
import os
ROOT = os.path.dirname(os.path.abspath(__file__)) + '/'
from astropy import log as logger
from astropy.io import fits
import numpy as np
from numpy.random import normal
from .psf import GaussianPSF, FilePSF, FunctionPSF
from .filter import Filter
from .utils.plot import MakePlots
from .utils.resolution import ConservingZoom, central
from .utils.tools import properties, grid_units, get_slices, average_collapse, central_wav
from .utils.units import ConvertUnits
# submitting PhD thesis today :)
class SyntheticCube(object):
'''
SyntheticCube is part the FluxCompensator. It converts
input_arrays (e. g. HYPERION ModelOutput) to "realistic"
synthetic observations (e.g. accounts for PSF, filters & noise).
It contains attributes like ModelOutput (see Notes).
If input_array is already a SyntheticCube object, the attributes are
passed. If input_array is not a SyntheticCube object, SyntheticCube
specific attributes are defined and then passed.
Parameters
----------
input_array : SyntheticCube, ModelOutput, optional
input_array also reads arrays with ModelOutput like properties.
unit_out : str, optional
The output units for SyntheticCube val. Valid options are:
* ``'ergs/cm^2/s'``
* ``'ergs/cm^2/s/Hz'``
* ``'Jy'``
* ``'mJy'``
* ``'MJy/sr'``
The default is ``'ergs/cm^2/s'``.
name : str
The name of the FluxCompensator object until another
input_array is called. The default is ``None``.
Attributes
----------
wav : numpy.ndarray
The wavelengths of val cube slices in microns.
val : numpy.ndarray
The 3D cube with shape (x, y, wav).
units : str
Current units of the val cube.
distance : str
Distance to the observed object in cm.
x_min : float
Physical offset from axis origin in FOV in cm.
x_max : float
Physical offset from axis origin in FOV in cm.
y_min : float
Physical offset from axis origin in FOV in cm.
y_max : float
Physical offset from axis origin in FOV in cm.
lon_min : float
Minimal longitudinal angle.
lon_max : float
Maximal longitudinal angle.
lat_min : float
Minimal latitudinal angle.
lat_max : float
Maximal latitudinal angle.
pix_area_sr : float
Pixel area per sr.
Notes
-----
unit_in : str
Unit of val in input_array. Valid options are:
* ``'ergs/cm^2/s'``
* ``'ergs/cm^2/s/Hz'``
* ``'Jy'``
* ``'mJy'``
* ``'MJy/sr'``
grid_unit : float
Physical unit of FOV axis in cm. Valid options are:
* ``au`` in cm
* ``pc`` in cm
* ``kpc`` in cm
grid_unit_name
Astronomical unit of FOV axis. Valid options are:
* ``'au'``
* ``'pc'``
* ``'kpc'``
FOV : tuple
Tuple ``FOV(x,y)`` of Field of View pixel entries.
* pixel in x direction: ``FOV[0]``
* pixel in y direction: ``FOV[1]``
name : str
The name of the FluxCompensator object until another
input_array is called. The default is ``None``.
stage : str
Gives current operation stage of SyntheticCube.
E. g. ``'SyntheticCube: convolve_filter'``
log : list
List of strings of the previous and current stages.
filter : dict
Dictionary ``filter = {name, waf_0, waf_min, waf_max}``
of the applied filter.
* name of filter: ``filter['name']``
* central wavelength: ``filter['waf_0']``
* minimal wavelength: ``filter['waf_min']``
* maximal wavelength: ``filter['waf_max']``
Returns
-------
cube : SyntheticCube
3D val array with SyntheticCube properties.
image : SyntheticImage
2D val array with SyntheticImage properties.
sed : SyntheticSED
1D val array (collapsed rough SED) with SyntheticSED properties.
flux : SyntheticFlux
0D val array (scalar) with SyntheticFlux properties.
'''
def __init__(self, input_array, unit_out='ergs/cm^2/s', name=None):
# Hyperion ModelOutput attributes
#if input_array.val.ndim == 3:
self.val = np.array(deepcopy(input_array.val))
#else:
# raise Exception('input_array does not have the right dimensions. numpy array of (x, y, wav) is required.')
self.wav = np.array(deepcopy(input_array.wav))
self.units = input_array.units
self.distance = input_array.distance
self.x_max = input_array.x_max
self.x_min = input_array.x_min
self.y_max = input_array.y_max
self.y_min = input_array.y_min
self.lon_min = input_array.lon_min
self.lon_max = input_array.lon_max
self.lat_min = input_array.lat_min
self.lat_max = input_array.lat_max
self.pix_area_sr = input_array.pix_area_sr
##################
# new attributes #
##################
if isinstance(input_array, SyntheticCube):
# attributes with are passed, since input_array is SyntheticCube
# physical values
self.unit_in = input_array.unit_in
self.unit_out = input_array.unit_out
self.grid_unit = input_array.grid_unit
self.grid_unit_name = input_array.grid_unit_name
# properties of cube
self.FOV = deepcopy(input_array.FOV)
# name
self.name = input_array.name
self.stage = input_array.stage
self.log = deepcopy(input_array.log)
# filter
self.filter = deepcopy(input_array.filter)
else: # attributes are defined, since input_array is NOT SyntheticCube
# physical values
self.unit_in = input_array.units
self.unit_out = unit_out
self.grid_unit = grid_units(self.x_max - self.x_min)['grid_unit']
self.grid_unit_name = grid_units(self.x_max - self.x_min)['grid_unit_name']
self.FOV = (self.x_max - self.x_min, self.y_max - self.y_min)
# name
self.name = name
self.stage = 'SyntheticCube: initial'
self.log = [self.stage]
# filter
self.filter = {'name': None, 'waf_0': None, 'waf_min': None, 'waf_max': None}
# convert into val units into unit_out
s = ConvertUnits(wav=self.wav, val=self.val)
self.val = s.get_unit(in_units=self.unit_in, out_units=self.unit_out, input_resolution=self.resolution['arcsec'])
self.units = self.unit_out
def extinction(self, A_v, input_opacities=None):
'''
Accounts for reddening.
Parameters
----------
A_v : Value of the visible extinction.
input_opacities : ``None``, str
If ``None`` standard extinction law is used.
Otherwise a e. g. input_opacities.txt file can be passed
as a str to read an opacity file with column #1 wav in microns
and column #2 in cm^2/g.
Default is ``None``.
Returns
-------
cube : SyntheticCube
'''
stage = 'SyntheticCube: extinction'
# read own extinction law
if input_opacities is None:
t = np.loadtxt(ROOT + 'database/extinction/extinction_law.txt')
else:
t = np.loadtxt(input_opacities)
wav_ext = t[:, 0]
k_lam = t[:, 1]
# wav_ext monotonically increasing
if wav_ext[0] > wav_ext[1]:
wav_ext = wav_ext[::-1]
k_lam = k_lam[::-1]
k_v = np.interp(0.550, wav_ext, k_lam)
# interpolate to get A_int for a certain wavelength
k = np.interp(self.wav, wav_ext, k_lam)
A_int_lam = A_v * (k / k_v)
# apply extinction law
val_ext = np.zeros(shape=np.shape(self.val))
val_ext[:,:,:len(self.wav)] = self.val[:,:,:len(self.wav)] * 10**(-0.4 * A_int_lam[:len(self.wav)])
# return SimulateCube
c = SyntheticCube(self)
c.val = val_ext
c.stage = stage
c.log.append(c.stage)
return c
def change_resolution(self, new_resolution, grid_plot=None):
'''
Changes the resolution of every slice of the val cube.
Parameters
----------
new_resolution : Resolution which the val array should get in ``arcsec/pixel.``
grid_plot : ``None``, ``True``
If ``True`` old and new resolution is visualized in a plot.
Default is ``None``.
Returns
-------
cube : SyntheticCube
'''
stage = 'SyntheticCube: change_resolution'
# debugging comment
logger.debug('-' * 70)
logger.debug(stage)
logger.debug('-' * 70)
logger.debug('total value before zoom : ' + str('%1.4e' % np.sum(self.val)) + ' ' + str(self.units))
# match resolution of psf and val slice
f = ConservingZoom(array=self.val, initial_resolution=self.resolution['arcsec'], new_resolution=new_resolution)
zoomed_val = f.zoom()
# average after changing resolution for MJy/sr
if self.units == 'MJy/sr' or self.units == 'Jy/arcsec^2':
# size of new pixel in units of old pixel
size = new_resolution ** 2 / self.resolution['arcsec'] ** 2
zoomed_val = zoomed_val / size
if grid_plot is not None:
f.zoom_grid(self.name)
# debugging comment
logger.debug('total value after zoom : ' + str('%1.4e' % np.sum(zoomed_val)) + ' ' + str(self.units))
# return SimulateCube
c = SyntheticCube(self)
c.val = zoomed_val
c.stage = stage
c.log.append(c.stage)
c.FOV = (f.len_nx / f.len_nrx * self.FOV[0], f.len_ny / f.len_nry * self.FOV[1])
return c
def central_pixel(self, dx, dy):
'''
Move array right and up to create a central pixel.
Returns
-------
cube : SyntheticCube
'''
stage = 'SyntheticCube: central_pixel'
ce = central(array=self.val, dx=dx, dy=dy)
len_x_old = float(self.pixel[0])
len_x_new = float(len(ce[:,0]))
len_y_old = float(self.pixel[1])
len_y_new = float(len(ce[0,:]))
old_FOV = self.FOV
new_FOV = (len_x_new / len_x_old * old_FOV[0], len_y_new / len_y_old * old_FOV[1])
# return SimulateCube
c = SyntheticCube(self)
c.val = ce
c.stage = stage
c.log.append(c.stage)
c.FOV = new_FOV
return c
def convolve_psf(self, psf):
'''
Convolves every slice of the val cube with a PSF of choice.
Parameters
----------
psf : GaussianPSF, FilePSF, database, FunctionPSF
* GaussianPSF(self, diameter): Convolves val with Gaussian PSF.
* FilePSF(self, psf_file, condensed): Reads PSF from input file.
* database: PSF object defined in FluxCompensator database.
* FunctionPSF(self, psf_function, width): Convolves val with calculated PSF.
Returns
-------
cube : SyntheticCube
'''
stage = 'SyntheticCube: convolve_PSF'
# debugging comments
if isinstance(psf, GaussianPSF):
logger.debug('-' * 70)
logger.debug(stage + 'with GaussianPSF')
logger.debug('-' * 70)
# convolve value with classes GaussianPSF, FilePSF and FunctionPSF
val = self.val.copy()
for i in range(len(self.wav)):
val[:, :, i] = psf.convolve(wav = self.wav[i], array = self.val[:,:, i], resolution = self.resolution)
# return SimulateCube
c = SyntheticCube(self)
c.val = val
c.stage = stage
c.log.append(c.stage)
return c
def convolve_filter(self, filter_input, plot_rebin=None, plot_rebin_dpi=None):
'''
Convolves slice within filter limits into a 2D image.
Parameters
----------
filter_input : object
* database : if filter ``name`` from FluxCompensator database is used.
* Filter : if own filter is used.
plot_rebin : ``True``, ``None``
Switch to plot the rebined filter and the original filter in one plot.
plot_rebin_dpi : ``None``, scalar > 0
The resolution in dots per inch.
``None`` is default and will use the value savefig.dpi
in the matplotlibrc file.
Returns
-------
image : SyntheticImage
'''
stage = 'SyntheticCube: convolve_filter'
# debugging comment
logger.debug('-' * 70)
logger.debug(stage)
logger.debug('-' * 70)
weight = filter_input.rebin(self.wav, self.val)
# returns weight{'wav_short' 'val_short' 'Response_new' 'filter_index' 'wavf_0' 'waf_min' 'waf_max' 'filter_name'}
wav_short = weight['wav_short']
val_short = weight['val_short']
filter_index = weight['filter_index']
Response_new = weight['Response_new']
waf_0 = weight['waf_0']
waf_min = weight['waf_min']
waf_max = weight['waf_max']
filter_name = weight['filter_name']
if plot_rebin is not None:
plot = filter_input.plot(val_name=self.name, dpi=plot_rebin_dpi)
# weight val_short with rebined response
val = val_short.copy()
val[:, :, :len(wav_short)] = val_short[:,:, :len(wav_short)] * Response_new[:len(wav_short)]
# collapse remaining cube into 2D
val_2D = np.sum(val, axis=2)
# return SyntheticImage
from .image import SyntheticImage
i = SyntheticImage(self)
i.log.append(stage)
i.stage = 'SyntheticImage: initial'
i.log.append(i.stage)
i.filter = {'name': filter_name, 'waf_0': waf_0, 'waf_min': waf_min, 'waf_max': waf_max}
i.val = val_2D
i.wav = np.array(waf_0)
return i
def add_noise(self, mu_noise, sigma_noise, seed=None, diagnostics=None):
'''
Adds normal distributed noise to every slice in the val cube
of SyntheticCube.
Parameters
----------
mu_noise : float
Mean of the normal distribution.
Good choice: mu_noise = 0.
sigma_noise : float
Standard deviation of the normal distribution. Good choice around:
* ``'ergs/cm^2/s'`` : sigma_noise = 10.**(-13)
* ``'ergs/cm^2/s/Hz'`` : sigma_noise = 10.**(-26)
* ``'Jy'`` : sigma_noise = 10.**(-3)
* ``'mJy'`` : sigma_noise = 10.**(-1)
* ``'MJy/sr'`` : sigma_noise = 10.**(-10)
seed : float, ``None``
When float seed fixes the random numbers to a certain
sequence in order to create reproducible results.
Default is ``None``.
diagnostics : truetype
When ``True`` noise array is stored in a fits file.
Returns
-------
cube : SyntheticCube
'''
stage = 'SyntheticCube: add_noise'
# add different noise with same mu and sigma to 3D cube
val = self.val.copy()
for i in range(len(self.wav)):
if sigma_noise != 0. and sigma_noise != 0:
if seed is not None:
np.random.seed(seed=seed)
noise = normal(mu_noise, sigma_noise, self.pixel)
if sigma_noise == 0. or sigma_noise == 0:
noise = np.zeros(self.pixel)
val[:, :, i] = self.val[:,:, i] + noise
if diagnostics is True:
fits.writeto(self.name + '_process-output_SC-noise.fits', noise, clobber=True)
# return SyntheticCube
c = SyntheticCube(self)
c.val = val
c.stage = stage
c.log.append(c.stage)
return c
def get_rough_sed(self):
'''
Collapses the current val cube into 1D array (SED).
Returns
-------
sed : SyntheticSED
'''
stage = 'SyntheticCube: get_rough_sed'
# for MJy/sr convert first, add and then convert back
if self.unit_out == 'MJy/sr' or self.unit_out == 'Jy/arcsec^2':
s = ConvertUnits(wav=self.wav, val=self.val)
self.val = s.get_unit(in_units=self.units, out_units='Jy', input_resolution=self.resolution['arcsec'])
# collapse every slice to one scalar value
rough_sed = np.sum(np.sum(self.val.copy(), axis=1), axis=0)
if self.unit_out == 'MJy/sr' or self.unit_out == 'Jy/arcsec^2':
s = ConvertUnits(wav=self.wav, val=rough_sed)
rough_sed = s.get_unit(in_units='Jy', out_units=self.unit_out, input_resolution=self.resolution['arcsec'] * self.pixel[0])
# return SyntheticSED
from .sed import SyntheticSED
s = SyntheticSED(self)
s.log.append(stage)
s.stage = 'SyntheticSED: initial'
s.log.append(s.stage)
s.val = rough_sed
return s
def get_total_val(self, wav_1, wav_2):
'''
Collapses the val of SyntheticCube within the boundaries wav_1
and wav_2 into a 0D value val.
WARNING: This tool cannot replace convolve_filter!
But it can be used to produce rough estimates
in-between the processes.
Parameters
----------
wav_1, wav_2 : float
Boundaries in microns.
Returns
-------
val : SyntheticFlux
'''
stage = 'SyntheticCube: get_total_val'
# slices within boundaries are extracted, averaged collapsed to an 2D image and finally collpased to a single scalar value
# for MJy/sr convert first, add and then convert back
if self.unit_out == 'MJy/sr' or self.unit_out == 'Jy/arcsec^2':
s = ConvertUnits(wav=self.wav, val=self.val)
val = s.get_unit(in_units=self.units, out_units='Jy', input_resolution=self.resolution['arcsec'])
else: val = self.val
c = get_slices(wav=self.wav, val=val, wav_1=wav_1, wav_2=wav_2)
i = average_collapse(val=c['val_short'])
f_total = np.sum(i)
# real limits within collaps
wav_max = 10 ** (np.log10(self.wav[c['filter_index'][0]]) + self.spacing_wav / 2.)
wav_min = 10 ** (np.log10(self.wav[c['filter_index'][-1]]) - self.spacing_wav / 2.)
wav_total = central_wav(wav=[wav_min, wav_max])
if self.unit_out == 'MJy/sr' or self.unit_out == 'Jy/arcsec^2':
s = ConvertUnits(wav=wav_total, val=f_total)
f_total = s.get_unit(in_units='Jy', out_units=self.unit_out, input_resolution=self.resolution['arcsec'] * self.pixel[0])
# return SyntheticFlux
from .flux import SyntheticFlux
f = SyntheticFlux(self)
f.log.append(stage)
f.stage = 'SyntheticFlux: initial'
f.log.append(f.stage)
f.wav = np.array(wav_total)
f.val = np.array(f_total)
f.filter = {'name': 'val_tot', 'waf_0': wav_total, 'waf_min': wav_min, 'waf_max': wav_max}
return f
def plot_image(self, wav_interest, prefix=None, name=None, multi_cut=None, single_cut=None, set_cut=None, dpi=None):
'''
Plots a certain slice close the wav_interest.
The wavelength interval of the chosen slice labels the plot.
Parameters
----------
wav_interest : float, ``None``
* float : wavelength close to slice in microns.
* ``None`` : Only if input_array is SyntheticImage like
prefix : str
Name of the image. Default naming chain is switched off.
name : str
Name of image within the default naming chain to distinguish the
plot files. E. g. 'PSF_gaussian'
mulit_cut : ``True``, ``None``
* ``True`` : plots chosen image slice at cuts of [100, 99, 95, 90]%.
* ``None`` : no mulit-plot is returned.
Default is ``None``.
single_cut : float, ``None``
* float : cut level for single plot of image slice between 0 and 100.
* ``None`` : no single plot is returned.
set_cut : tuple, ``None``
* tuple : set_cut(v_min, v_max)
Minimal and maximal physical value of val in the colorbars.
* ``None`` : no plot with minimal and maximal cut is returned.
Default is ``None``.
dpi : ``None``, scalar > 0
The resolution in dots per inch.
``None`` is default and will use the value valig.dpi
in the matplotlibrc file.
Returns
-------
cube : SyntheticCube
'''
stage = 'SyntheticCube: plot_image'
if prefix is None and name is None:
raise Exception('If prefix name is not given, you need to give the a name to enable the default naming chain.')
if prefix is not None:
if multi_cut is True and (single_cut is not None or set_cut is not None):
raise Exception('If prefix naming is enabled only one plotting option can be chosen.')
elif multi_cut is None and (single_cut is not None and set_cut is not None):
raise Exception('If prefix naming is enabled only one plotting option can be chosen.')
plot = MakePlots(prefix=prefix, name=name, input_array=SyntheticCube(self), wav_interest=wav_interest, multi_cut=multi_cut, single_cut=single_cut, set_cut=set_cut, dpi=dpi)
# return SyntheticCube
c = SyntheticCube(self)
c.stage = stage
c.log.append(c.stage)
return c
@property
def spacing_wav(self):
'''
The property spacing_wav estimates the width of the logarithmic
spaced wav entries.
'''
if self.wav.ndim != 0:
spacing_wav = np.log10(self.wav[0] / self.wav[-1]) / (len(self.wav) - 1)
else:
spacing_wav = None
return spacing_wav
@property
def pixel(self):
'''
The property pixel is a tuple which resembles the current pixel in a
value val. ``pixel(x,y)`` are calls as follows:
``x = pixel[0]``
``y = pixel[1]``
'''
if self.val.ndim in (0, 1):
pixel = (None, None)
if self.val.ndim in (2, 3):
pixel = (self.val.shape[0], self.val.shape[1])
return pixel
@property
def shape(self):
'''
The property shape is a string, which resembles the current shape of
the value val.
scalar: ``'()'``
1D: ``'(wav)'``
2D: ``'(x, y)'``
3D: ``'(x, y , wav)'``
'''
if self.val.ndim == 0:
shape = '()'
if self.val.ndim == 1:
shape = '(wav)'
if self.val.ndim == 2:
shape = '(x, y)'
if self.val.ndim == 3:
shape = '(x, y, wav)'
return shape
@property
def resolution(self):
'''
The property resolution tells you the current resolution. If we are already
in the SED or flux everything is considered as one large pixel.
resolution in arcsec per pixel : ``resolution['arcsec']``
resolution in rad per pixel : ``resolution['rad']``
'''
resolution = {}
if self.pixel[0] is None:
resolution['rad'] = self.FOV[0] / 1. / self.distance
else:
resolution['rad'] = self.FOV[0] / self.pixel[0] / self.distance
resolution['arcsec'] = np.degrees(resolution['rad']) * 3600
return resolution
| bsd-2-clause | -1,980,972,553,126,057,000 | 31.483204 | 180 | 0.532655 | false |
End of preview. Expand
in Dataset Viewer.
YAML Metadata
Warning:
empty or missing yaml metadata in repo card
(https://huggingface.co/docs/hub/datasets-cards)
CodeParrot 🦜 Dataset Cleaned (valid)
Train split of CodeParrot 🦜 Dataset Cleaned.
Dataset structure
DatasetDict({
train: Dataset({
features: ['repo_name', 'path', 'copies', 'size', 'content', 'license', 'hash', 'line_mean', 'line_max', 'alpha_frac', 'autogenerated'],
num_rows: 61373
})
})
- Downloads last month
- 326