repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
meteorcloudy/tensorflow | tensorflow/python/kernel_tests/io_ops_test.py | 23 | 4898 | # -*- coding: utf-8 -*-
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.ops.io_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
from tensorflow.python.ops import io_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class IoOpsTest(test.TestCase):
def testReadFile(self):
cases = ['', 'Some contents', 'Неки садржаји на српском']
for contents in cases:
contents = compat.as_bytes(contents)
with tempfile.NamedTemporaryFile(
prefix='ReadFileTest', dir=self.get_temp_dir(), delete=False) as temp:
temp.write(contents)
with self.test_session():
read = io_ops.read_file(temp.name)
self.assertEqual([], read.get_shape())
self.assertEqual(read.eval(), contents)
os.remove(temp.name)
def testWriteFile(self):
cases = ['', 'Some contents']
for contents in cases:
contents = compat.as_bytes(contents)
with tempfile.NamedTemporaryFile(
prefix='WriteFileTest', dir=self.get_temp_dir(),
delete=False) as temp:
pass
with self.test_session() as sess:
w = io_ops.write_file(temp.name, contents)
sess.run(w)
with open(temp.name, 'rb') as f:
file_contents = f.read()
self.assertEqual(file_contents, contents)
os.remove(temp.name)
def testWriteFileCreateDir(self):
cases = ['', 'Some contents']
for contents in cases:
contents = compat.as_bytes(contents)
subdir = os.path.join(self.get_temp_dir(), 'subdir1')
filepath = os.path.join(subdir, 'subdir2', 'filename')
with self.test_session() as sess:
w = io_ops.write_file(filepath, contents)
sess.run(w)
with open(filepath, 'rb') as f:
file_contents = f.read()
self.assertEqual(file_contents, contents)
shutil.rmtree(subdir)
def _subset(self, files, indices):
return set(
compat.as_bytes(files[i].name) for i in range(len(files))
if i in indices)
def testMatchingFiles(self):
cases = [
'ABcDEF.GH', 'ABzDEF.GH', 'ABasdfjklDEF.GH', 'AB3DEF.GH', 'AB4DEF.GH',
'ABDEF.GH', 'XYZ'
]
files = [
tempfile.NamedTemporaryFile(
prefix=c, dir=self.get_temp_dir(), delete=True) for c in cases
]
with self.test_session():
# Test exact match without wildcards.
for f in files:
self.assertEqual(
io_ops.matching_files(f.name).eval(), compat.as_bytes(f.name))
# We will look for files matching "ABxDEF.GH*" where "x" is some wildcard.
directory_path = files[0].name[:files[0].name.find(cases[0])]
pattern = directory_path + 'AB%sDEF.GH*'
self.assertEqual(
set(io_ops.matching_files(pattern % 'z').eval()),
self._subset(files, [1]))
self.assertEqual(
set(io_ops.matching_files(pattern % '?').eval()),
self._subset(files, [0, 1, 3, 4]))
self.assertEqual(
set(io_ops.matching_files(pattern % '*').eval()),
self._subset(files, [0, 1, 2, 3, 4, 5]))
# NOTE(mrry): Windows uses PathMatchSpec to match file patterns, which
# does not support the following expressions.
if os.name != 'nt':
self.assertEqual(
set(io_ops.matching_files(pattern % '[cxz]').eval()),
self._subset(files, [0, 1]))
self.assertEqual(
set(io_ops.matching_files(pattern % '[0-9]').eval()),
self._subset(files, [3, 4]))
# Test an empty list input.
self.assertItemsEqual(io_ops.matching_files([]).eval(), [])
# Test multiple exact filenames.
self.assertItemsEqual(
io_ops.matching_files([
files[0].name, files[1].name, files[2].name]).eval(),
self._subset(files, [0, 1, 2]))
# Test multiple globs.
self.assertItemsEqual(
io_ops.matching_files([
pattern % '?', directory_path + 'X?Z*']).eval(),
self._subset(files, [0, 1, 3, 4, 6]))
for f in files:
f.close()
if __name__ == '__main__':
test.main()
| apache-2.0 |
yd0str/infernal-twin | build/pip/build/lib.linux-i686-2.7/pip/_vendor/requests/packages/chardet/sbcharsetprober.py | 2927 | 4793 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
from .compat import wrap_ord
SAMPLE_SIZE = 64
SB_ENOUGH_REL_THRESHOLD = 1024
POSITIVE_SHORTCUT_THRESHOLD = 0.95
NEGATIVE_SHORTCUT_THRESHOLD = 0.05
SYMBOL_CAT_ORDER = 250
NUMBER_OF_SEQ_CAT = 4
POSITIVE_CAT = NUMBER_OF_SEQ_CAT - 1
#NEGATIVE_CAT = 0
class SingleByteCharSetProber(CharSetProber):
def __init__(self, model, reversed=False, nameProber=None):
CharSetProber.__init__(self)
self._mModel = model
# TRUE if we need to reverse every pair in the model lookup
self._mReversed = reversed
# Optional auxiliary prober for name decision
self._mNameProber = nameProber
self.reset()
def reset(self):
CharSetProber.reset(self)
# char order of last character
self._mLastOrder = 255
self._mSeqCounters = [0] * NUMBER_OF_SEQ_CAT
self._mTotalSeqs = 0
self._mTotalChar = 0
# characters that fall in our sampling range
self._mFreqChar = 0
def get_charset_name(self):
if self._mNameProber:
return self._mNameProber.get_charset_name()
else:
return self._mModel['charsetName']
def feed(self, aBuf):
if not self._mModel['keepEnglishLetter']:
aBuf = self.filter_without_english_letters(aBuf)
aLen = len(aBuf)
if not aLen:
return self.get_state()
for c in aBuf:
order = self._mModel['charToOrderMap'][wrap_ord(c)]
if order < SYMBOL_CAT_ORDER:
self._mTotalChar += 1
if order < SAMPLE_SIZE:
self._mFreqChar += 1
if self._mLastOrder < SAMPLE_SIZE:
self._mTotalSeqs += 1
if not self._mReversed:
i = (self._mLastOrder * SAMPLE_SIZE) + order
model = self._mModel['precedenceMatrix'][i]
else: # reverse the order of the letters in the lookup
i = (order * SAMPLE_SIZE) + self._mLastOrder
model = self._mModel['precedenceMatrix'][i]
self._mSeqCounters[model] += 1
self._mLastOrder = order
if self.get_state() == constants.eDetecting:
if self._mTotalSeqs > SB_ENOUGH_REL_THRESHOLD:
cf = self.get_confidence()
if cf > POSITIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, we have a'
'winner\n' %
(self._mModel['charsetName'], cf))
self._mState = constants.eFoundIt
elif cf < NEGATIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, below negative'
'shortcut threshhold %s\n' %
(self._mModel['charsetName'], cf,
NEGATIVE_SHORTCUT_THRESHOLD))
self._mState = constants.eNotMe
return self.get_state()
def get_confidence(self):
r = 0.01
if self._mTotalSeqs > 0:
r = ((1.0 * self._mSeqCounters[POSITIVE_CAT]) / self._mTotalSeqs
/ self._mModel['mTypicalPositiveRatio'])
r = r * self._mFreqChar / self._mTotalChar
if r >= 1.0:
r = 0.99
return r
| gpl-3.0 |
richardcs/ansible | lib/ansible/modules/network/f5/bigip_software_update.py | 11 | 9756 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_software_update
short_description: Manage the software update settings of a BIG-IP
description:
- Manage the software update settings of a BIG-IP.
version_added: 2.5
options:
auto_check:
description:
- Specifies whether to automatically check for updates on the F5
Networks downloads server.
type: bool
auto_phone_home:
description:
- Specifies whether to automatically send phone home data to the
F5 Networks PhoneHome server.
type: bool
frequency:
description:
- Specifies the schedule for the automatic update check.
choices:
- daily
- monthly
- weekly
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Enable automatic update checking
bigip_software_update:
auto_check: yes
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Disable automatic update checking and phoning home
bigip_software_update:
auto_check: no
auto_phone_home: no
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
auto_check:
description: Whether the system checks for updates automatically.
returned: changed
type: bool
sample: True
auto_phone_home:
description: Whether the system automatically sends phone home data.
returned: changed
type: bool
sample: True
frequency:
description: Frequency of auto update checks
returned: changed
type: string
sample: weekly
'''
from ansible.module_utils.basic import AnsibleModule
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
class Parameters(AnsibleF5Parameters):
api_map = {
'autoCheck': 'auto_check',
'autoPhonehome': 'auto_phone_home'
}
api_attributes = [
'autoCheck', 'autoPhonehome', 'frequency',
]
updatables = [
'auto_check', 'auto_phone_home', 'frequency',
]
returnables = [
'auto_check', 'auto_phone_home', 'frequency',
]
class ApiParameters(Parameters):
@property
def auto_check(self):
if self._values['auto_check'] is None:
return None
return self._values['auto_check']
class ModuleParameters(Parameters):
@property
def auto_check(self):
if self._values['auto_check'] is None:
return None
elif self._values['auto_check'] is True:
return 'enabled'
else:
return 'disabled'
@property
def auto_phone_home(self):
if self._values['auto_phone_home'] is None:
return None
elif self._values['auto_phone_home'] is True:
return 'enabled'
else:
return 'disabled'
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
@property
def auto_check(self):
if self._values['auto_check'] == 'enabled':
return True
elif self._values['auto_check'] == 'disabled':
return False
@property
def auto_phone_home(self):
if self._values['auto_phone_home'] == 'enabled':
return True
elif self._values['auto_phone_home'] == 'disabled':
return False
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.have = None
self.want = ModuleParameters(params=self.module.params)
self.changes = UsableChanges()
def exec_module(self): # lgtm [py/similar-function]
result = dict()
changed = self.update()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/software/update/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/sys/software/update/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
auto_check=dict(
type='bool'
),
auto_phone_home=dict(
type='bool'
),
frequency=dict(
choices=['daily', 'monthly', 'weekly']
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
exit_json(module, results, client)
except F5ModuleError as ex:
cleanup_tokens(client)
fail_json(module, ex, client)
if __name__ == '__main__':
main()
| gpl-3.0 |
joshuahoman/vivisect | vstruct/qt/__init__.py | 6 | 5522 | '''
Some utils for QT code which uses vstruct...
'''
import vqt.tree as vq_tree
from PyQt4 import QtCore, QtGui
from vqt.main import idlethread, idlethreadsync
class VQStructNamespacesView(vq_tree.VQTreeView):
def __init__(self, parent=None):
vq_tree.VQTreeView.__init__(self, parent=parent)
#model = vq_tree.VTreeView(parent=self, columns=('Namespace', 'Structure'))
class VQStructSelectView(vq_tree.VQTreeView):
def __init__(self, vsbuilder, parent=None):
vq_tree.VQTreeView.__init__(self, parent=parent)
self.vsbuilder = vsbuilder
model = vq_tree.VQTreeModel(parent=self, columns=('Namespace', 'Structure'))
for nsname in vsbuilder.getVStructNamespaceNames():
pnode = model.append((nsname, ''))
pnode.structname = None
for sname in vsbuilder.getVStructNames(namespace=nsname):
spnode = model.append(('', sname), parent=pnode)
spnode.structname = '%s.%s' % (nsname, sname)
for sname in vsbuilder.getVStructNames():
node = model.append( ('', sname ) )
node.structname = sname
self.setModel(model)
class VQStructSelectDialog(QtGui.QDialog):
def __init__(self, vsbuilder, parent=None):
QtGui.QDialog.__init__(self, parent=parent)
self.structname = None
self.setWindowTitle('Select a structure...')
vlyt = QtGui.QVBoxLayout()
hlyt = QtGui.QHBoxLayout()
self.structtree = VQStructSelectView(vsbuilder, parent=self)
hbox = QtGui.QWidget(parent=self)
ok = QtGui.QPushButton("Ok", parent=hbox)
cancel = QtGui.QPushButton("Cancel", parent=hbox)
self.structtree.doubleClicked.connect( self.dialog_activated )
ok.clicked.connect(self.dialog_ok)
cancel.clicked.connect(self.dialog_cancel)
hlyt.addStretch(1)
hlyt.addWidget(cancel)
hlyt.addWidget(ok)
hbox.setLayout(hlyt)
vlyt.addWidget(self.structtree)
vlyt.addWidget(hbox)
self.setLayout(vlyt)
self.resize(500, 500)
def dialog_activated(self, idx):
if idx.isValid():
pnode = idx.internalPointer()
self.structname = pnode.structname
self.accept()
def dialog_ok(self):
for idx in self.structtree.selectedIndexes():
pnode = idx.internalPointer()
self.structname = pnode.structname
self.accept()
def dialog_cancel(self):
self.reject()
@idlethreadsync
def selectStructure(vsbuilder, parent=None):
d = VQStructSelectDialog(vsbuilder, parent=parent)
r = d.exec_()
return d.structname
class VQStructNamespacesView(vq_tree.VQTreeView):
def __init__(self, parent=None):
vq_tree.VQTreeView.__init__(self, parent=parent)
model = vq_tree.VQTreeModel(parent=self, columns=('Subsystem', 'Module Name'))
win = model.append(('windows', ''))
xp_i386_user = model.append(('Windows XP i386 Userland', ''), parent=win)
xp_i386_ntdll = model.append(('','ntdll'), parent=xp_i386_user)
xp_i386_ntdll.modinfo = ('ntdll','vstruct.defs.windows.win_5_1_i386.ntdll')
xp_i386_kern = model.append(('Windows XP i386 Kernel', ''), parent=win)
xp_i386_nt = model.append(('','nt'), parent=xp_i386_kern)
xp_i386_nt.modinfo = ('nt','vstruct.defs.windows.win_5_1_i386.ntoskrnl')
xp_i386_win32k = model.append(('','win32k'), parent=xp_i386_kern)
xp_i386_win32k.modinfo = ('win32k','vstruct.defs.windows.win_5_1_i386.win32k')
win7_amd64_user = model.append(('Windows 7 amd64 Userland', ''), parent=win)
win7_amd64_ntdll = model.append(('','ntdll'), parent=win7_amd64_user)
win7_amd64_ntdll.modinfo = ('ntdll','vstruct.defs.windows.win_6_1_amd64.ntdll')
pos = model.append(('posix',''))
pos_elf = model.append(('', 'Elf'), parent=pos)
pos_elf.modinfo = ('elf', 'vstruct.defs.elf')
self.setModel(model)
class VQStructNamespaceDialog(QtGui.QDialog):
def __init__(self, parent=None):
QtGui.QDialog.__init__(self, parent=parent)
self.modinfo = None
self.setWindowTitle('Select a module...')
vlyt = QtGui.QVBoxLayout()
hlyt = QtGui.QHBoxLayout()
self.structtree = VQStructNamespacesView(parent=self)
hbox = QtGui.QWidget(parent=self)
ok = QtGui.QPushButton("Ok", parent=hbox)
cancel = QtGui.QPushButton("Cancel", parent=hbox)
self.structtree.doubleClicked.connect( self.dialog_activated )
ok.clicked.connect(self.dialog_ok)
cancel.clicked.connect(self.dialog_cancel)
hlyt.addStretch(1)
hlyt.addWidget(cancel)
hlyt.addWidget(ok)
hbox.setLayout(hlyt)
vlyt.addWidget(self.structtree)
vlyt.addWidget(hbox)
self.setLayout(vlyt)
self.resize(500, 500)
def dialog_activated(self, idx):
if idx.isValid():
pnode = idx.internalPointer()
self.modinfo = getattr(pnode, 'modinfo', None)
self.accept()
def dialog_ok(self):
for idx in self.structtree.selectedIndexes():
pnode = idx.internalPointer()
self.modinfo = getattr(pnode, 'modinfo', None)
self.accept()
def dialog_cancel(self):
self.reject()
@idlethreadsync
def selectStructNamespace(parent=None):
d = VQStructNamespaceDialog(parent=parent)
r = d.exec_()
return d.modinfo
| apache-2.0 |
infrascloudy/flask-base | app/models/user.py | 1 | 6385 | from flask import current_app
from flask_login import AnonymousUserMixin, UserMixin
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from itsdangerous import BadSignature, SignatureExpired
from werkzeug.security import check_password_hash, generate_password_hash
from app import db, login_manager
class Permission:
GENERAL = 0x01
ADMINISTER = 0xff
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
index = db.Column(db.String(64))
default = db.Column(db.Boolean, default=False, index=True)
permissions = db.Column(db.Integer)
users = db.relationship('User', backref='role', lazy='dynamic')
@staticmethod
def insert_roles():
roles = {
'User': (Permission.GENERAL, 'main', True),
'Administrator': (
Permission.ADMINISTER,
'admin',
False # grants all permissions
)
}
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.permissions = roles[r][0]
role.index = roles[r][1]
role.default = roles[r][2]
db.session.add(role)
db.session.commit()
def __repr__(self):
return '<Role \'%s\'>' % self.name
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
confirmed = db.Column(db.Boolean, default=False)
first_name = db.Column(db.String(64), index=True)
last_name = db.Column(db.String(64), index=True)
email = db.Column(db.String(64), unique=True, index=True)
password_hash = db.Column(db.String(128))
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
if self.role is None:
if self.email == current_app.config['ADMIN_EMAIL']:
self.role = Role.query.filter_by(
permissions=Permission.ADMINISTER).first()
if self.role is None:
self.role = Role.query.filter_by(default=True).first()
def full_name(self):
return '%s %s' % (self.first_name, self.last_name)
def can(self, permissions):
return self.role is not None and \
(self.role.permissions & permissions) == permissions
def is_admin(self):
return self.can(Permission.ADMINISTER)
@property
def password(self):
raise AttributeError('`password` is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=604800):
"""Generate a confirmation token to email a new user."""
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def generate_email_change_token(self, new_email, expiration=3600):
"""Generate an email change token to email an existing user."""
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'change_email': self.id, 'new_email': new_email})
def generate_password_reset_token(self, expiration=3600):
"""
Generate a password reset change token to email to an existing user.
"""
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'reset': self.id})
def confirm_account(self, token):
"""Verify that the provided token is for this user's id."""
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except (BadSignature, SignatureExpired):
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
db.session.commit()
return True
def change_email(self, token):
"""Verify the new email for this user."""
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except (BadSignature, SignatureExpired):
return False
if data.get('change_email') != self.id:
return False
new_email = data.get('new_email')
if new_email is None:
return False
if self.query.filter_by(email=new_email).first() is not None:
return False
self.email = new_email
db.session.add(self)
db.session.commit()
return True
def reset_password(self, token, new_password):
"""Verify the new password for this user."""
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except (BadSignature, SignatureExpired):
return False
if data.get('reset') != self.id:
return False
self.password = new_password
db.session.add(self)
db.session.commit()
return True
@staticmethod
def generate_fake(count=100, **kwargs):
"""Generate a number of fake users for testing."""
from sqlalchemy.exc import IntegrityError
from random import seed, choice
from faker import Faker
fake = Faker()
roles = Role.query.all()
seed()
for i in range(count):
u = User(
first_name=fake.first_name(),
last_name=fake.last_name(),
email=fake.email(),
password=fake.password(),
confirmed=True,
role=choice(roles),
**kwargs)
db.session.add(u)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
def __repr__(self):
return '<User \'%s\'>' % self.full_name()
class AnonymousUser(AnonymousUserMixin):
def can(self, _):
return False
def is_admin(self):
return False
login_manager.anonymous_user = AnonymousUser
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
| mit |
joernhees/git-hg-remote-bug_gae-init | main/lib/werkzeug/contrib/atom.py | 7 | 15329 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.atom
~~~~~~~~~~~~~~~~~~~~~
This module provides a class called :class:`AtomFeed` which can be
used to generate feeds in the Atom syndication format (see :rfc:`4287`).
Example::
def atom_feed(request):
feed = AtomFeed("My Blog", feed_url=request.url,
url=request.host_url,
subtitle="My example blog for a feed test.")
for post in Post.query.limit(10).all():
feed.add(post.title, post.body, content_type='html',
author=post.author, url=post.url, id=post.uid,
updated=post.last_update, published=post.pub_date)
return feed.get_response()
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from datetime import datetime
from werkzeug.utils import escape
from werkzeug.wrappers import BaseResponse
from werkzeug._compat import implements_to_string, string_types
XHTML_NAMESPACE = 'http://www.w3.org/1999/xhtml'
def _make_text_block(name, content, content_type=None):
"""Helper function for the builder that creates an XML text block."""
if content_type == 'xhtml':
return u'<%s type="xhtml"><div xmlns="%s">%s</div></%s>\n' % \
(name, XHTML_NAMESPACE, content, name)
if not content_type:
return u'<%s>%s</%s>\n' % (name, escape(content), name)
return u'<%s type="%s">%s</%s>\n' % (name, content_type,
escape(content), name)
def format_iso8601(obj):
"""Format a datetime object for iso8601"""
return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
@implements_to_string
class AtomFeed(object):
"""A helper class that creates Atom feeds.
:param title: the title of the feed. Required.
:param title_type: the type attribute for the title element. One of
``'html'``, ``'text'`` or ``'xhtml'``.
:param url: the url for the feed (not the url *of* the feed)
:param id: a globally unique id for the feed. Must be an URI. If
not present the `feed_url` is used, but one of both is
required.
:param updated: the time the feed was modified the last time. Must
be a :class:`datetime.datetime` object. If not
present the latest entry's `updated` is used.
:param feed_url: the URL to the feed. Should be the URL that was
requested.
:param author: the author of the feed. Must be either a string (the
name) or a dict with name (required) and uri or
email (both optional). Can be a list of (may be
mixed, too) strings and dicts, too, if there are
multiple authors. Required if not every entry has an
author element.
:param icon: an icon for the feed.
:param logo: a logo for the feed.
:param rights: copyright information for the feed.
:param rights_type: the type attribute for the rights element. One of
``'html'``, ``'text'`` or ``'xhtml'``. Default is
``'text'``.
:param subtitle: a short description of the feed.
:param subtitle_type: the type attribute for the subtitle element.
One of ``'text'``, ``'html'``, ``'text'``
or ``'xhtml'``. Default is ``'text'``.
:param links: additional links. Must be a list of dictionaries with
href (required) and rel, type, hreflang, title, length
(all optional)
:param generator: the software that generated this feed. This must be
a tuple in the form ``(name, url, version)``. If
you don't want to specify one of them, set the item
to `None`.
:param entries: a list with the entries for the feed. Entries can also
be added later with :meth:`add`.
For more information on the elements see
http://www.atomenabled.org/developers/syndication/
Everywhere where a list is demanded, any iterable can be used.
"""
default_generator = ('Werkzeug', None, None)
def __init__(self, title=None, entries=None, **kwargs):
self.title = title
self.title_type = kwargs.get('title_type', 'text')
self.url = kwargs.get('url')
self.feed_url = kwargs.get('feed_url', self.url)
self.id = kwargs.get('id', self.feed_url)
self.updated = kwargs.get('updated')
self.author = kwargs.get('author', ())
self.icon = kwargs.get('icon')
self.logo = kwargs.get('logo')
self.rights = kwargs.get('rights')
self.rights_type = kwargs.get('rights_type')
self.subtitle = kwargs.get('subtitle')
self.subtitle_type = kwargs.get('subtitle_type', 'text')
self.generator = kwargs.get('generator')
if self.generator is None:
self.generator = self.default_generator
self.links = kwargs.get('links', [])
self.entries = entries and list(entries) or []
if not hasattr(self.author, '__iter__') \
or isinstance(self.author, string_types + (dict,)):
self.author = [self.author]
for i, author in enumerate(self.author):
if not isinstance(author, dict):
self.author[i] = {'name': author}
if not self.title:
raise ValueError('title is required')
if not self.id:
raise ValueError('id is required')
for author in self.author:
if 'name' not in author:
raise TypeError('author must contain at least a name')
def add(self, *args, **kwargs):
"""Add a new entry to the feed. This function can either be called
with a :class:`FeedEntry` or some keyword and positional arguments
that are forwarded to the :class:`FeedEntry` constructor.
"""
if len(args) == 1 and not kwargs and isinstance(args[0], FeedEntry):
self.entries.append(args[0])
else:
kwargs['feed_url'] = self.feed_url
self.entries.append(FeedEntry(*args, **kwargs))
def __repr__(self):
return '<%s %r (%d entries)>' % (
self.__class__.__name__,
self.title,
len(self.entries)
)
def generate(self):
"""Return a generator that yields pieces of XML."""
# atom demands either an author element in every entry or a global one
if not self.author:
if False in map(lambda e: bool(e.author), self.entries):
self.author = ({'name': 'Unknown author'},)
if not self.updated:
dates = sorted([entry.updated for entry in self.entries])
self.updated = dates and dates[-1] or datetime.utcnow()
yield u'<?xml version="1.0" encoding="utf-8"?>\n'
yield u'<feed xmlns="http://www.w3.org/2005/Atom">\n'
yield ' ' + _make_text_block('title', self.title, self.title_type)
yield u' <id>%s</id>\n' % escape(self.id)
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url, True)
if self.feed_url:
yield u' <link href="%s" rel="self" />\n' % \
escape(self.feed_url, True)
for link in self.links:
yield u' <link %s/>\n' % ''.join('%s="%s" ' % \
(k, escape(link[k], True)) for k in link)
for author in self.author:
yield u' <author>\n'
yield u' <name>%s</name>\n' % escape(author['name'])
if 'uri' in author:
yield u' <uri>%s</uri>\n' % escape(author['uri'])
if 'email' in author:
yield ' <email>%s</email>\n' % escape(author['email'])
yield ' </author>\n'
if self.subtitle:
yield ' ' + _make_text_block('subtitle', self.subtitle,
self.subtitle_type)
if self.icon:
yield u' <icon>%s</icon>\n' % escape(self.icon)
if self.logo:
yield u' <logo>%s</logo>\n' % escape(self.logo)
if self.rights:
yield ' ' + _make_text_block('rights', self.rights,
self.rights_type)
generator_name, generator_url, generator_version = self.generator
if generator_name or generator_url or generator_version:
tmp = [u' <generator']
if generator_url:
tmp.append(u' uri="%s"' % escape(generator_url, True))
if generator_version:
tmp.append(u' version="%s"' % escape(generator_version, True))
tmp.append(u'>%s</generator>\n' % escape(generator_name))
yield u''.join(tmp)
for entry in self.entries:
for line in entry.generate():
yield u' ' + line
yield u'</feed>\n'
def to_string(self):
"""Convert the feed into a string."""
return u''.join(self.generate())
def get_response(self):
"""Return a response object for the feed."""
return BaseResponse(self.to_string(), mimetype='application/atom+xml')
def __call__(self, environ, start_response):
"""Use the class as WSGI response object."""
return self.get_response()(environ, start_response)
def __str__(self):
return self.to_string()
@implements_to_string
class FeedEntry(object):
"""Represents a single entry in a feed.
:param title: the title of the entry. Required.
:param title_type: the type attribute for the title element. One of
``'html'``, ``'text'`` or ``'xhtml'``.
:param content: the content of the entry.
:param content_type: the type attribute for the content element. One
of ``'html'``, ``'text'`` or ``'xhtml'``.
:param summary: a summary of the entry's content.
:param summary_type: the type attribute for the summary element. One
of ``'html'``, ``'text'`` or ``'xhtml'``.
:param url: the url for the entry.
:param id: a globally unique id for the entry. Must be an URI. If
not present the URL is used, but one of both is required.
:param updated: the time the entry was modified the last time. Must
be a :class:`datetime.datetime` object. Required.
:param author: the author of the entry. Must be either a string (the
name) or a dict with name (required) and uri or
email (both optional). Can be a list of (may be
mixed, too) strings and dicts, too, if there are
multiple authors. Required if the feed does not have an
author element.
:param published: the time the entry was initially published. Must
be a :class:`datetime.datetime` object.
:param rights: copyright information for the entry.
:param rights_type: the type attribute for the rights element. One of
``'html'``, ``'text'`` or ``'xhtml'``. Default is
``'text'``.
:param links: additional links. Must be a list of dictionaries with
href (required) and rel, type, hreflang, title, length
(all optional)
:param categories: categories for the entry. Must be a list of dictionaries
with term (required), scheme and label (all optional)
:param xml_base: The xml base (url) for this feed item. If not provided
it will default to the item url.
For more information on the elements see
http://www.atomenabled.org/developers/syndication/
Everywhere where a list is demanded, any iterable can be used.
"""
def __init__(self, title=None, content=None, feed_url=None, **kwargs):
self.title = title
self.title_type = kwargs.get('title_type', 'text')
self.content = content
self.content_type = kwargs.get('content_type', 'html')
self.url = kwargs.get('url')
self.id = kwargs.get('id', self.url)
self.updated = kwargs.get('updated')
self.summary = kwargs.get('summary')
self.summary_type = kwargs.get('summary_type', 'html')
self.author = kwargs.get('author', ())
self.published = kwargs.get('published')
self.rights = kwargs.get('rights')
self.links = kwargs.get('links', [])
self.categories = kwargs.get('categories', [])
self.xml_base = kwargs.get('xml_base', feed_url)
if not hasattr(self.author, '__iter__') \
or isinstance(self.author, string_types + (dict,)):
self.author = [self.author]
for i, author in enumerate(self.author):
if not isinstance(author, dict):
self.author[i] = {'name': author}
if not self.title:
raise ValueError('title is required')
if not self.id:
raise ValueError('id is required')
if not self.updated:
raise ValueError('updated is required')
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.title
)
def generate(self):
"""Yields pieces of ATOM XML."""
base = ''
if self.xml_base:
base = ' xml:base="%s"' % escape(self.xml_base, True)
yield u'<entry%s>\n' % base
yield u' ' + _make_text_block('title', self.title, self.title_type)
yield u' <id>%s</id>\n' % escape(self.id)
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated)
if self.published:
yield u' <published>%s</published>\n' % \
format_iso8601(self.published)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url)
for author in self.author:
yield u' <author>\n'
yield u' <name>%s</name>\n' % escape(author['name'])
if 'uri' in author:
yield u' <uri>%s</uri>\n' % escape(author['uri'])
if 'email' in author:
yield u' <email>%s</email>\n' % escape(author['email'])
yield u' </author>\n'
for link in self.links:
yield u' <link %s/>\n' % ''.join('%s="%s" ' % \
(k, escape(link[k], True)) for k in link)
for category in self.categories:
yield u' <category %s/>\n' % ''.join('%s="%s" ' % \
(k, escape(category[k], True)) for k in category)
if self.summary:
yield u' ' + _make_text_block('summary', self.summary,
self.summary_type)
if self.content:
yield u' ' + _make_text_block('content', self.content,
self.content_type)
yield u'</entry>\n'
def to_string(self):
"""Convert the feed item into a unicode object."""
return u''.join(self.generate())
def __str__(self):
return self.to_string()
| mit |
75651/kbengine_cloud | kbe/src/lib/python/Lib/hmac.py | 142 | 5063 | """HMAC (Keyed-Hashing for Message Authentication) Python module.
Implements the HMAC algorithm as described by RFC 2104.
"""
import warnings as _warnings
from _operator import _compare_digest as compare_digest
import hashlib as _hashlib
trans_5C = bytes((x ^ 0x5C) for x in range(256))
trans_36 = bytes((x ^ 0x36) for x in range(256))
# The size of the digests returned by HMAC depends on the underlying
# hashing module used. Use digest_size from the instance of HMAC instead.
digest_size = None
class HMAC:
"""RFC 2104 HMAC class. Also complies with RFC 4231.
This supports the API for Cryptographic Hash Functions (PEP 247).
"""
blocksize = 64 # 512-bit HMAC; can be changed in subclasses.
def __init__(self, key, msg = None, digestmod = None):
"""Create a new HMAC object.
key: key for the keyed hash object.
msg: Initial input for the hash, if provided.
digestmod: A module supporting PEP 247. *OR*
A hashlib constructor returning a new hash object. *OR*
A hash name suitable for hashlib.new().
Defaults to hashlib.md5.
Implicit default to hashlib.md5 is deprecated and will be
removed in Python 3.6.
Note: key and msg must be a bytes or bytearray objects.
"""
if not isinstance(key, (bytes, bytearray)):
raise TypeError("key: expected bytes or bytearray, but got %r" % type(key).__name__)
if digestmod is None:
_warnings.warn("HMAC() without an explicit digestmod argument "
"is deprecated.", PendingDeprecationWarning, 2)
digestmod = _hashlib.md5
if callable(digestmod):
self.digest_cons = digestmod
elif isinstance(digestmod, str):
self.digest_cons = lambda d=b'': _hashlib.new(digestmod, d)
else:
self.digest_cons = lambda d=b'': digestmod.new(d)
self.outer = self.digest_cons()
self.inner = self.digest_cons()
self.digest_size = self.inner.digest_size
if hasattr(self.inner, 'block_size'):
blocksize = self.inner.block_size
if blocksize < 16:
_warnings.warn('block_size of %d seems too small; using our '
'default of %d.' % (blocksize, self.blocksize),
RuntimeWarning, 2)
blocksize = self.blocksize
else:
_warnings.warn('No block_size attribute on given digest object; '
'Assuming %d.' % (self.blocksize),
RuntimeWarning, 2)
blocksize = self.blocksize
# self.blocksize is the default blocksize. self.block_size is
# effective block size as well as the public API attribute.
self.block_size = blocksize
if len(key) > blocksize:
key = self.digest_cons(key).digest()
key = key + bytes(blocksize - len(key))
self.outer.update(key.translate(trans_5C))
self.inner.update(key.translate(trans_36))
if msg is not None:
self.update(msg)
@property
def name(self):
return "hmac-" + self.inner.name
def update(self, msg):
"""Update this hashing object with the string msg.
"""
self.inner.update(msg)
def copy(self):
"""Return a separate copy of this hashing object.
An update to this copy won't affect the original object.
"""
# Call __new__ directly to avoid the expensive __init__.
other = self.__class__.__new__(self.__class__)
other.digest_cons = self.digest_cons
other.digest_size = self.digest_size
other.inner = self.inner.copy()
other.outer = self.outer.copy()
return other
def _current(self):
"""Return a hash object for the current state.
To be used only internally with digest() and hexdigest().
"""
h = self.outer.copy()
h.update(self.inner.digest())
return h
def digest(self):
"""Return the hash value of this hashing object.
This returns a string containing 8-bit data. The object is
not altered in any way by this function; you can continue
updating the object after calling this function.
"""
h = self._current()
return h.digest()
def hexdigest(self):
"""Like digest(), but returns a string of hexadecimal digits instead.
"""
h = self._current()
return h.hexdigest()
def new(key, msg = None, digestmod = None):
"""Create a new hashing object and return it.
key: The starting key for the hash.
msg: if available, will immediately be hashed into the object's starting
state.
You can now feed arbitrary strings into the object using its update()
method, and can ask for the hash value at any time by calling its digest()
method.
"""
return HMAC(key, msg, digestmod)
| lgpl-3.0 |
edwardzhou1980/bite-project | deps/mrtaskman/server/util/model_to_dict.py | 16 | 1536 | """Converts AppEngine db.Model's to JSON."""
from google.appengine.ext import db
from google.appengine.ext.blobstore import blobstore
import datetime
import json
import logging
import time
from util import db_properties
SIMPLE_TYPES = (int, long, float, bool, dict, basestring, list)
def ModelToDict(model):
"""Returns dictionary from given db.Model."""
if not isinstance(model, db.Model):
logging.error('%s is not an instance of db.Model. It is %s',
model, model.__class__)
assert isinstance(model, db.Model)
output = {}
output['id'] = model.key().id_or_name()
for key, prop in model.properties().iteritems():
value = getattr(model, key)
if value is None:
output[key] = value
elif isinstance(prop, db_properties.JsonProperty):
output[key] = json.loads(value)
elif isinstance(value, SIMPLE_TYPES):
output[key] = value
elif isinstance(value, datetime.date):
# Convert date/datetime to ms-since-epoch ("new Date()").
ms = time.mktime(value.utctimetuple()) * 1000
ms += getattr(value, 'microseconds', 0) / 1000
output[key] = int(ms)
elif isinstance(value, db.GeoPt):
output[key] = {'lat': value.lat, 'lon': value.lon}
elif isinstance(prop, blobstore.BlobReferenceProperty):
# TODO: Implement this if it's needed.
output[key] = 'UnimplementedBlobRef'
elif isinstance(value, db.Model):
output[key] = ModelToDict(value)
else:
raise ValueError('cannot encode ' + repr(prop))
return output
| apache-2.0 |
georgewhewell/CouchPotatoServer | libs/xmpp/auth.py | 196 | 15633 | ## auth.py
##
## Copyright (C) 2003-2005 Alexey "Snake" Nezhdanov
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2, or (at your option)
## any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
# $Id: auth.py,v 1.41 2008/09/13 21:45:21 normanr Exp $
"""
Provides library with all Non-SASL and SASL authentication mechanisms.
Can be used both for client and transport authentication.
"""
from protocol import *
from client import PlugIn
import sha,base64,random,dispatcher,re
import md5
def HH(some): return md5.new(some).hexdigest()
def H(some): return md5.new(some).digest()
def C(some): return ':'.join(some)
class NonSASL(PlugIn):
""" Implements old Non-SASL (JEP-0078) authentication used in jabberd1.4 and transport authentication."""
def __init__(self,user,password,resource):
""" Caches username, password and resource for auth. """
PlugIn.__init__(self)
self.DBG_LINE='gen_auth'
self.user=user
self.password=password
self.resource=resource
def plugin(self,owner):
""" Determine the best auth method (digest/0k/plain) and use it for auth.
Returns used method name on success. Used internally. """
if not self.resource: return self.authComponent(owner)
self.DEBUG('Querying server about possible auth methods','start')
resp=owner.Dispatcher.SendAndWaitForResponse(Iq('get',NS_AUTH,payload=[Node('username',payload=[self.user])]))
if not isResultNode(resp):
self.DEBUG('No result node arrived! Aborting...','error')
return
iq=Iq(typ='set',node=resp)
query=iq.getTag('query')
query.setTagData('username',self.user)
query.setTagData('resource',self.resource)
if query.getTag('digest'):
self.DEBUG("Performing digest authentication",'ok')
query.setTagData('digest',sha.new(owner.Dispatcher.Stream._document_attrs['id']+self.password).hexdigest())
if query.getTag('password'): query.delChild('password')
method='digest'
elif query.getTag('token'):
token=query.getTagData('token')
seq=query.getTagData('sequence')
self.DEBUG("Performing zero-k authentication",'ok')
hash = sha.new(sha.new(self.password).hexdigest()+token).hexdigest()
for foo in xrange(int(seq)): hash = sha.new(hash).hexdigest()
query.setTagData('hash',hash)
method='0k'
else:
self.DEBUG("Sequre methods unsupported, performing plain text authentication",'warn')
query.setTagData('password',self.password)
method='plain'
resp=owner.Dispatcher.SendAndWaitForResponse(iq)
if isResultNode(resp):
self.DEBUG('Sucessfully authenticated with remove host.','ok')
owner.User=self.user
owner.Resource=self.resource
owner._registered_name=owner.User+'@'+owner.Server+'/'+owner.Resource
return method
self.DEBUG('Authentication failed!','error')
def authComponent(self,owner):
""" Authenticate component. Send handshake stanza and wait for result. Returns "ok" on success. """
self.handshake=0
owner.send(Node(NS_COMPONENT_ACCEPT+' handshake',payload=[sha.new(owner.Dispatcher.Stream._document_attrs['id']+self.password).hexdigest()]))
owner.RegisterHandler('handshake',self.handshakeHandler,xmlns=NS_COMPONENT_ACCEPT)
while not self.handshake:
self.DEBUG("waiting on handshake",'notify')
owner.Process(1)
owner._registered_name=self.user
if self.handshake+1: return 'ok'
def handshakeHandler(self,disp,stanza):
""" Handler for registering in dispatcher for accepting transport authentication. """
if stanza.getName()=='handshake': self.handshake=1
else: self.handshake=-1
class SASL(PlugIn):
""" Implements SASL authentication. """
def __init__(self,username,password):
PlugIn.__init__(self)
self.username=username
self.password=password
def plugin(self,owner):
if not self._owner.Dispatcher.Stream._document_attrs.has_key('version'): self.startsasl='not-supported'
elif self._owner.Dispatcher.Stream.features:
try: self.FeaturesHandler(self._owner.Dispatcher,self._owner.Dispatcher.Stream.features)
except NodeProcessed: pass
else: self.startsasl=None
def auth(self):
""" Start authentication. Result can be obtained via "SASL.startsasl" attribute and will be
either "success" or "failure". Note that successfull auth will take at least
two Dispatcher.Process() calls. """
if self.startsasl: pass
elif self._owner.Dispatcher.Stream.features:
try: self.FeaturesHandler(self._owner.Dispatcher,self._owner.Dispatcher.Stream.features)
except NodeProcessed: pass
else: self._owner.RegisterHandler('features',self.FeaturesHandler,xmlns=NS_STREAMS)
def plugout(self):
""" Remove SASL handlers from owner's dispatcher. Used internally. """
if self._owner.__dict__.has_key('features'): self._owner.UnregisterHandler('features',self.FeaturesHandler,xmlns=NS_STREAMS)
if self._owner.__dict__.has_key('challenge'): self._owner.UnregisterHandler('challenge',self.SASLHandler,xmlns=NS_SASL)
if self._owner.__dict__.has_key('failure'): self._owner.UnregisterHandler('failure',self.SASLHandler,xmlns=NS_SASL)
if self._owner.__dict__.has_key('success'): self._owner.UnregisterHandler('success',self.SASLHandler,xmlns=NS_SASL)
def FeaturesHandler(self,conn,feats):
""" Used to determine if server supports SASL auth. Used internally. """
if not feats.getTag('mechanisms',namespace=NS_SASL):
self.startsasl='not-supported'
self.DEBUG('SASL not supported by server','error')
return
mecs=[]
for mec in feats.getTag('mechanisms',namespace=NS_SASL).getTags('mechanism'):
mecs.append(mec.getData())
self._owner.RegisterHandler('challenge',self.SASLHandler,xmlns=NS_SASL)
self._owner.RegisterHandler('failure',self.SASLHandler,xmlns=NS_SASL)
self._owner.RegisterHandler('success',self.SASLHandler,xmlns=NS_SASL)
if "ANONYMOUS" in mecs and self.username == None:
node=Node('auth',attrs={'xmlns':NS_SASL,'mechanism':'ANONYMOUS'})
elif "DIGEST-MD5" in mecs:
node=Node('auth',attrs={'xmlns':NS_SASL,'mechanism':'DIGEST-MD5'})
elif "PLAIN" in mecs:
sasl_data='%s\x00%s\x00%s'%(self.username+'@'+self._owner.Server,self.username,self.password)
node=Node('auth',attrs={'xmlns':NS_SASL,'mechanism':'PLAIN'},payload=[base64.encodestring(sasl_data).replace('\r','').replace('\n','')])
else:
self.startsasl='failure'
self.DEBUG('I can only use DIGEST-MD5 and PLAIN mecanisms.','error')
return
self.startsasl='in-process'
self._owner.send(node.__str__())
raise NodeProcessed
def SASLHandler(self,conn,challenge):
""" Perform next SASL auth step. Used internally. """
if challenge.getNamespace()<>NS_SASL: return
if challenge.getName()=='failure':
self.startsasl='failure'
try: reason=challenge.getChildren()[0]
except: reason=challenge
self.DEBUG('Failed SASL authentification: %s'%reason,'error')
raise NodeProcessed
elif challenge.getName()=='success':
self.startsasl='success'
self.DEBUG('Successfully authenticated with remote server.','ok')
handlers=self._owner.Dispatcher.dumpHandlers()
self._owner.Dispatcher.PlugOut()
dispatcher.Dispatcher().PlugIn(self._owner)
self._owner.Dispatcher.restoreHandlers(handlers)
self._owner.User=self.username
raise NodeProcessed
########################################3333
incoming_data=challenge.getData()
chal={}
data=base64.decodestring(incoming_data)
self.DEBUG('Got challenge:'+data,'ok')
for pair in re.findall('(\w+\s*=\s*(?:(?:"[^"]+")|(?:[^,]+)))',data):
key,value=[x.strip() for x in pair.split('=', 1)]
if value[:1]=='"' and value[-1:]=='"': value=value[1:-1]
chal[key]=value
if chal.has_key('qop') and 'auth' in [x.strip() for x in chal['qop'].split(',')]:
resp={}
resp['username']=self.username
resp['realm']=self._owner.Server
resp['nonce']=chal['nonce']
cnonce=''
for i in range(7):
cnonce+=hex(int(random.random()*65536*4096))[2:]
resp['cnonce']=cnonce
resp['nc']=('00000001')
resp['qop']='auth'
resp['digest-uri']='xmpp/'+self._owner.Server
A1=C([H(C([resp['username'],resp['realm'],self.password])),resp['nonce'],resp['cnonce']])
A2=C(['AUTHENTICATE',resp['digest-uri']])
response= HH(C([HH(A1),resp['nonce'],resp['nc'],resp['cnonce'],resp['qop'],HH(A2)]))
resp['response']=response
resp['charset']='utf-8'
sasl_data=''
for key in ['charset','username','realm','nonce','nc','cnonce','digest-uri','response','qop']:
if key in ['nc','qop','response','charset']: sasl_data+="%s=%s,"%(key,resp[key])
else: sasl_data+='%s="%s",'%(key,resp[key])
########################################3333
node=Node('response',attrs={'xmlns':NS_SASL},payload=[base64.encodestring(sasl_data[:-1]).replace('\r','').replace('\n','')])
self._owner.send(node.__str__())
elif chal.has_key('rspauth'): self._owner.send(Node('response',attrs={'xmlns':NS_SASL}).__str__())
else:
self.startsasl='failure'
self.DEBUG('Failed SASL authentification: unknown challenge','error')
raise NodeProcessed
class Bind(PlugIn):
""" Bind some JID to the current connection to allow router know of our location."""
def __init__(self):
PlugIn.__init__(self)
self.DBG_LINE='bind'
self.bound=None
def plugin(self,owner):
""" Start resource binding, if allowed at this time. Used internally. """
if self._owner.Dispatcher.Stream.features:
try: self.FeaturesHandler(self._owner.Dispatcher,self._owner.Dispatcher.Stream.features)
except NodeProcessed: pass
else: self._owner.RegisterHandler('features',self.FeaturesHandler,xmlns=NS_STREAMS)
def plugout(self):
""" Remove Bind handler from owner's dispatcher. Used internally. """
self._owner.UnregisterHandler('features',self.FeaturesHandler,xmlns=NS_STREAMS)
def FeaturesHandler(self,conn,feats):
""" Determine if server supports resource binding and set some internal attributes accordingly. """
if not feats.getTag('bind',namespace=NS_BIND):
self.bound='failure'
self.DEBUG('Server does not requested binding.','error')
return
if feats.getTag('session',namespace=NS_SESSION): self.session=1
else: self.session=-1
self.bound=[]
def Bind(self,resource=None):
""" Perform binding. Use provided resource name or random (if not provided). """
while self.bound is None and self._owner.Process(1): pass
if resource: resource=[Node('resource',payload=[resource])]
else: resource=[]
resp=self._owner.SendAndWaitForResponse(Protocol('iq',typ='set',payload=[Node('bind',attrs={'xmlns':NS_BIND},payload=resource)]))
if isResultNode(resp):
self.bound.append(resp.getTag('bind').getTagData('jid'))
self.DEBUG('Successfully bound %s.'%self.bound[-1],'ok')
jid=JID(resp.getTag('bind').getTagData('jid'))
self._owner.User=jid.getNode()
self._owner.Resource=jid.getResource()
resp=self._owner.SendAndWaitForResponse(Protocol('iq',typ='set',payload=[Node('session',attrs={'xmlns':NS_SESSION})]))
if isResultNode(resp):
self.DEBUG('Successfully opened session.','ok')
self.session=1
return 'ok'
else:
self.DEBUG('Session open failed.','error')
self.session=0
elif resp: self.DEBUG('Binding failed: %s.'%resp.getTag('error'),'error')
else:
self.DEBUG('Binding failed: timeout expired.','error')
return ''
class ComponentBind(PlugIn):
""" ComponentBind some JID to the current connection to allow router know of our location."""
def __init__(self, sasl):
PlugIn.__init__(self)
self.DBG_LINE='bind'
self.bound=None
self.needsUnregister=None
self.sasl = sasl
def plugin(self,owner):
""" Start resource binding, if allowed at this time. Used internally. """
if not self.sasl:
self.bound=[]
return
if self._owner.Dispatcher.Stream.features:
try: self.FeaturesHandler(self._owner.Dispatcher,self._owner.Dispatcher.Stream.features)
except NodeProcessed: pass
else:
self._owner.RegisterHandler('features',self.FeaturesHandler,xmlns=NS_STREAMS)
self.needsUnregister=1
def plugout(self):
""" Remove ComponentBind handler from owner's dispatcher. Used internally. """
if self.needsUnregister:
self._owner.UnregisterHandler('features',self.FeaturesHandler,xmlns=NS_STREAMS)
def FeaturesHandler(self,conn,feats):
""" Determine if server supports resource binding and set some internal attributes accordingly. """
if not feats.getTag('bind',namespace=NS_BIND):
self.bound='failure'
self.DEBUG('Server does not requested binding.','error')
return
if feats.getTag('session',namespace=NS_SESSION): self.session=1
else: self.session=-1
self.bound=[]
def Bind(self,domain=None):
""" Perform binding. Use provided domain name (if not provided). """
while self.bound is None and self._owner.Process(1): pass
if self.sasl:
xmlns = NS_COMPONENT_1
else:
xmlns = None
self.bindresponse = None
ttl = dispatcher.DefaultTimeout
self._owner.RegisterHandler('bind',self.BindHandler,xmlns=xmlns)
self._owner.send(Protocol('bind',attrs={'name':domain},xmlns=NS_COMPONENT_1))
while self.bindresponse is None and self._owner.Process(1) and ttl > 0: ttl-=1
self._owner.UnregisterHandler('bind',self.BindHandler,xmlns=xmlns)
resp=self.bindresponse
if resp and resp.getAttr('error'):
self.DEBUG('Binding failed: %s.'%resp.getAttr('error'),'error')
elif resp:
self.DEBUG('Successfully bound.','ok')
return 'ok'
else:
self.DEBUG('Binding failed: timeout expired.','error')
return ''
def BindHandler(self,conn,bind):
self.bindresponse = bind
pass
| gpl-3.0 |
zakuro9715/lettuce | tests/integration/lib/Django-1.2.5/django/core/mail/backends/console.py | 308 | 1295 | """
Email backend that writes messages to console instead of sending them.
"""
import sys
import threading
from django.core.mail.backends.base import BaseEmailBackend
class EmailBackend(BaseEmailBackend):
def __init__(self, *args, **kwargs):
self.stream = kwargs.pop('stream', sys.stdout)
self._lock = threading.RLock()
super(EmailBackend, self).__init__(*args, **kwargs)
def send_messages(self, email_messages):
"""Write all messages to the stream in a thread-safe way."""
if not email_messages:
return
self._lock.acquire()
try:
# The try-except is nested to allow for
# Python 2.4 support (Refs #12147)
try:
stream_created = self.open()
for message in email_messages:
self.stream.write('%s\n' % message.message().as_string())
self.stream.write('-'*79)
self.stream.write('\n')
self.stream.flush() # flush after each message
if stream_created:
self.close()
except:
if not self.fail_silently:
raise
finally:
self._lock.release()
return len(email_messages)
| gpl-3.0 |
cnbeining/you-get | src/you_get/extractors/huaban.py | 7 | 2281 | #!/usr/bin/env python
import json
import os
import re
import math
import traceback
import urllib.parse as urlparse
from ..common import *
__all__ = ['huaban_download']
site_info = '花瓣 (Huaban)'
LIMIT = 100
class Board:
def __init__(self, title, pins):
self.title = title
self.pins = pins
self.pin_count = len(pins)
class Pin:
host = 'http://img.hb.aicdn.com/'
def __init__(self, pin_json):
img_file = pin_json['file']
self.id = str(pin_json['pin_id'])
self.url = urlparse.urljoin(self.host, img_file['key'])
self.ext = img_file['type'].split('/')[-1]
def construct_url(url, **params):
param_str = urlparse.urlencode(params)
return url + '?' + param_str
def extract_json_data(url, **params):
url = construct_url(url, **params)
html = get_content(url, headers=fake_headers)
json_string = match1(html, r'app.page\["board"\] = (.*?});')
json_data = json.loads(json_string)
return json_data
def extract_board_data(url):
json_data = extract_json_data(url, limit=LIMIT)
pin_list = json_data['pins']
title = json_data['title']
pin_count = json_data['pin_count']
pin_count -= len(pin_list)
while pin_count > 0:
json_data = extract_json_data(url, max=pin_list[-1]['pin_id'],
limit=LIMIT)
pins = json_data['pins']
pin_list += pins
pin_count -= len(pins)
return Board(title, list(map(Pin, pin_list)))
def huaban_download_board(url, output_dir, **kwargs):
kwargs['merge'] = False
board = extract_board_data(url)
output_dir = os.path.join(output_dir, board.title)
print_info(site_info, board.title, 'jpg', float('Inf'))
for pin in board.pins:
download_urls([pin.url], pin.id, pin.ext, float('Inf'),
output_dir=output_dir, faker=True, **kwargs)
def huaban_download(url, output_dir='.', **kwargs):
if re.match(r'http://huaban\.com/boards/\d+/', url):
huaban_download_board(url, output_dir, **kwargs)
else:
print('Only board (画板) pages are supported currently')
print('ex: http://huaban.com/boards/12345678/')
download = huaban_download
download_playlist = playlist_not_supported("huaban")
| mit |
ME-ICA/me-ica | meica.libs/mdp/graph/graph.py | 1 | 13012 | # inspired by some code by Nathan Denny (1999)
# see http://www.ece.arizona.edu/~denny/python_nest/graph_lib_1.0.1.html
try:
# use reduce against BDFL's will even on python > 2.6
from functools import reduce
except ImportError:
pass
class GraphException(Exception):
"""Base class for exception in the graph package."""
pass
class GraphTopologicalException(GraphException):
"""Exception thrown during a topological sort if the graph is cyclical."""
pass
def is_sequence(x):
return isinstance(x, (list, tuple))
def recursive_map(func, seq):
"""Apply a function recursively on a sequence and all subsequences."""
def _func(x):
if is_sequence(x):
return recursive_map(func, x)
else:
return func(x)
return map(_func, seq)
def recursive_reduce(func, seq, *argv):
"""Apply reduce(func, seq) recursively to a sequence and all its
subsequences."""
def _func(x, y):
if is_sequence(y):
return func(x, recursive_reduce(func, y))
else:
return func(x, y)
return reduce(_func, seq, *argv)
class GraphNode(object):
"""Represent a graph node and all information attached to it."""
def __init__(self, data=None):
self.data = data
# edges in
self.ein = []
# edges out
self.eout = []
def add_edge_in(self, edge):
self.ein.append(edge)
def add_edge_out(self, edge):
self.eout.append(edge)
def remove_edge_in(self, edge):
self.ein.remove(edge)
def remove_edge_out(self, edge):
self.eout.remove(edge)
def get_edges_in(self, from_ = None):
"""Return a copy of the list of the entering edges. If from_
is specified, return only the nodes coming from that node."""
inedges = self.ein[:]
if from_:
inedges = [edge for edge in inedges if edge.head == from_]
return inedges
def get_edges_out(self, to_ = None):
"""Return a copy of the list of the outgoing edges. If to_
is specified, return only the nodes going to that node."""
outedges = self.eout[:]
if to_:
outedges = [edge for edge in outedges if edge.tail == to_]
return outedges
def get_edges(self, neighbor = None):
"""Return a copy of all edges. If neighbor is specified, return
only the edges connected to that node."""
return ( self.get_edges_in(from_=neighbor) +
self.get_edges_out(to_=neighbor) )
def in_degree(self):
"""Return the number of entering edges."""
return len(self.ein)
def out_degree(self):
"""Return the number of outgoing edges."""
return len(self.eout)
def degree(self):
"""Return the number of edges."""
return self.in_degree()+self.out_degree()
def in_neighbors(self):
"""Return the neighbors down in-edges (i.e. the parents nodes)."""
return map(lambda x: x.get_head(), self.ein)
def out_neighbors(self):
"""Return the neighbors down in-edges (i.e. the parents nodes)."""
return map(lambda x: x.get_tail(), self.eout)
def neighbors(self):
return self.in_neighbors() + self.out_neighbors()
class GraphEdge(object):
"""Represent a graph edge and all information attached to it."""
def __init__(self, head, tail, data=None):
# head node
self.head = head
# neighbors out
self.tail = tail
# arbitrary data slot
self.data = data
def get_ends(self):
"""Return the tuple (head_id, tail_id)."""
return (self.head, self.tail)
def get_tail(self):
return self.tail
def get_head(self):
return self.head
class Graph(object):
"""Represent a directed graph."""
def __init__(self):
# list of nodes
self.nodes = []
# list of edges
self.edges = []
# node functions
def add_node(self, data=None):
node = GraphNode(data=data)
self.nodes.append(node)
return node
def remove_node(self, node):
# the node is not in this graph
if node not in self.nodes:
errstr = 'This node is not part of the graph (%s)' % node
raise GraphException(errstr)
# remove all edges containing this node
for edge in node.get_edges():
self.remove_edge(edge)
# remove the node
self.nodes.remove(node)
# edge functions
def add_edge(self, head, tail, data=None):
"""Add an edge going from head to tail.
head : head node
tail : tail node
"""
# create edge
edge = GraphEdge(head, tail, data=data)
# add edge to head and tail node
head.add_edge_out(edge)
tail.add_edge_in(edge)
# add to the edges dictionary
self.edges.append(edge)
return edge
def remove_edge(self, edge):
head, tail = edge.get_ends()
# remove from head
head.remove_edge_out(edge)
# remove from tail
tail.remove_edge_in(edge)
# remove the edge
self.edges.remove(edge)
### populate functions
def add_nodes(self, data):
"""Add many nodes at once.
data -- number of nodes to add or sequence of data values, one for
each new node"""
if not is_sequence(data):
data = [None]*data
return map(self.add_node, data)
def add_tree(self, tree):
"""Add a tree to the graph.
The tree is specified with a nested list of tuple, in a LISP-like
notation. The values specified in the list become the values of
the single nodes.
Return an equivalent nested list with the nodes instead of the values.
Example:
>>> a=b=c=d=e=None
>>> g.add_tree( (a, b, (c, d ,e)) )
corresponds to this tree structure, with all node values set to None:
a
/ \
b c
/ \
d e
"""
def _add_edge(root, son):
self.add_edge(root, son)
return root
nodes = recursive_map(self.add_node, tree)
recursive_reduce(_add_edge, nodes)
return nodes
def add_full_connectivity(self, from_nodes, to_nodes):
"""Add full connectivity from a group of nodes to another one.
Return a list of lists of edges, one for each node in 'from_nodes'.
Example: create a two-layer graph with full connectivity.
>>> g = Graph()
>>> layer1 = g.add_nodes(10)
>>> layer2 = g.add_nodes(5)
>>> g.add_full_connectivity(layer1, layer2)
"""
edges = []
for from_ in from_nodes:
edges.append(map(lambda x: self.add_edge(from_, x), to_nodes))
return edges
###### graph algorithms
def topological_sort(self):
"""Perform a topological sort of the nodes. If the graph has a cycle,
throw a GraphTopologicalException with the list of successfully
ordered nodes."""
# topologically sorted list of the nodes (result)
topological_list = []
# queue (fifo list) of the nodes with in_degree 0
topological_queue = []
# {node: in_degree} for the remaining nodes (those with in_degree>0)
remaining_indegree = {}
# init queues and lists
for node in self.nodes:
indegree = node.in_degree()
if indegree == 0:
topological_queue.append(node)
else:
remaining_indegree[node] = indegree
# remove nodes with in_degree 0 and decrease the in_degree of their sons
while len(topological_queue):
# remove the first node with degree 0
node = topological_queue.pop(0)
topological_list.append(node)
# decrease the in_degree of the sons
for son in node.out_neighbors():
remaining_indegree[son] -= 1
if remaining_indegree[son] == 0:
topological_queue.append(son)
# if not all nodes were covered, the graph must have a cycle
# raise a GraphTopographicalException
if len(topological_list)!=len(self.nodes):
raise GraphTopologicalException(topological_list)
return topological_list
### Depth-First sort
def _dfs(self, neighbors_fct, root, visit_fct=None):
# core depth-first sort function
# changing the neighbors function to return the sons of a node,
# its parents, or both one gets normal dfs, reverse dfs, or
# dfs on the equivalent undirected graph, respectively
# result list containing the nodes in Depth-First order
dfs_list = []
# keep track of all already visited nodes
visited_nodes = { root: None }
# stack (lifo) list
dfs_stack = []
dfs_stack.append(root)
while len(dfs_stack):
# consider the next node on the stack
node = dfs_stack.pop()
dfs_list.append(node)
# visit the node
if visit_fct != None:
visit_fct(node)
# add all sons to the stack (if not already visited)
for son in neighbors_fct(node):
if son not in visited_nodes:
visited_nodes[son] = None
dfs_stack.append(son)
return dfs_list
def dfs(self, root, visit_fct=None):
"""Return a list of nodes in some Depth First order starting from
a root node. If defined, visit_fct is applied on each visited node.
The returned list does not have to contain all nodes in the
graph, but only the ones reachable from the root.
"""
neighbors_fct = lambda node: node.out_neighbors()
return self._dfs(neighbors_fct, root, visit_fct=visit_fct)
def undirected_dfs(self, root, visit_fct=None):
"""Perform Depth First sort.
This function is identical to dfs, but the sort is performed on
the equivalent undirected version of the graph."""
neighbors_fct = lambda node: node.neighbors()
return self._dfs(neighbors_fct, root, visit_fct=visit_fct)
### Connected components
def connected_components(self):
"""Return a list of lists containing the nodes of all connected
components of the graph."""
visited = {}
def visit_fct(node, visited=visited):
visited[node] = None
components = []
nodes = self.nodes
for node in nodes:
if node in visited:
continue
components.append(self.undirected_dfs(node, visit_fct))
return components
def is_weakly_connected(self):
"""Return True if the graph is weakly connected."""
return len(self.undirected_dfs(self.nodes[0]))==len(self.nodes)
### Breadth-First Sort
# BFS and DFS could be generalized to one function. I leave them
# distinct for clarity.
def _bfs(self, neighbors_fct, root, visit_fct=None):
# core breadth-first sort function
# changing the neighbors function to return the sons of a node,
# its parents, or both one gets normal bfs, reverse bfs, or
# bfs on the equivalent undirected graph, respectively
# result list containing the nodes in Breadth-First order
bfs_list = []
# keep track of all already visited nodes
visited_nodes = { root: None }
# queue (fifo) list
bfs_queue = []
bfs_queue.append(root)
while len(bfs_queue):
# consider the next node in the queue
node = bfs_queue.pop(0)
bfs_list.append(node)
# visit the node
if visit_fct != None:
visit_fct(node)
# add all sons to the queue (if not already visited)
for son in neighbors_fct(node):
if son not in visited_nodes:
visited_nodes[son] = None
bfs_queue.append(son)
return bfs_list
def bfs(self, root, visit_fct=None):
"""Return a list of nodes in some Breadth First order starting from
a root node. If defined, visit_fct is applied on each visited node.
Note the returned list does not have to contain all nodes in the
graph, but only the ones reachable from the root."""
neighbors_fct = lambda node: node.out_neighbors()
return self._bfs(neighbors_fct, root, visit_fct=visit_fct)
def undirected_bfs(self, root, visit_fct=None):
"""Perform Breadth First sort.
This function is identical to bfs, but the sort is performed on
the equivalent undirected version of the graph."""
neighbors_fct = lambda node: node.neighbors()
return self._bfs(neighbors_fct, root, visit_fct=visit_fct)
| lgpl-2.1 |
joequery/django | tests/utils_tests/test_html.py | 160 | 10711 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from datetime import datetime
from django.test import SimpleTestCase, ignore_warnings
from django.utils import html, safestring, six
from django.utils._os import upath
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import force_text
class TestUtilsHtml(SimpleTestCase):
def check_output(self, function, value, output=None):
"""
Check that function(value) equals output. If output is None,
check that function(value) equals value.
"""
if output is None:
output = value
self.assertEqual(function(value), output)
def test_escape(self):
f = html.escape
items = (
('&', '&'),
('<', '<'),
('>', '>'),
('"', '"'),
("'", '''),
)
# Substitution patterns for testing the above items.
patterns = ("%s", "asdf%sfdsa", "%s1", "1%sb")
for value, output in items:
for pattern in patterns:
self.check_output(f, pattern % value, pattern % output)
# Check repeated values.
self.check_output(f, value * 2, output * 2)
# Verify it doesn't double replace &.
self.check_output(f, '<&', '<&')
def test_format_html(self):
self.assertEqual(
html.format_html("{} {} {third} {fourth}",
"< Dangerous >",
html.mark_safe("<b>safe</b>"),
third="< dangerous again",
fourth=html.mark_safe("<i>safe again</i>")
),
"< Dangerous > <b>safe</b> < dangerous again <i>safe again</i>"
)
def test_linebreaks(self):
f = html.linebreaks
items = (
("para1\n\npara2\r\rpara3", "<p>para1</p>\n\n<p>para2</p>\n\n<p>para3</p>"),
("para1\nsub1\rsub2\n\npara2", "<p>para1<br />sub1<br />sub2</p>\n\n<p>para2</p>"),
("para1\r\n\r\npara2\rsub1\r\rpara4", "<p>para1</p>\n\n<p>para2<br />sub1</p>\n\n<p>para4</p>"),
("para1\tmore\n\npara2", "<p>para1\tmore</p>\n\n<p>para2</p>"),
)
for value, output in items:
self.check_output(f, value, output)
def test_strip_tags(self):
f = html.strip_tags
items = (
('<p>See: 'é is an apostrophe followed by e acute</p>',
'See: 'é is an apostrophe followed by e acute'),
('<adf>a', 'a'),
('</adf>a', 'a'),
('<asdf><asdf>e', 'e'),
('hi, <f x', 'hi, <f x'),
('234<235, right?', '234<235, right?'),
('a4<a5 right?', 'a4<a5 right?'),
('b7>b2!', 'b7>b2!'),
('</fe', '</fe'),
('<x>b<y>', 'b'),
('a<p onclick="alert(\'<test>\')">b</p>c', 'abc'),
('a<p a >b</p>c', 'abc'),
('d<a:b c:d>e</p>f', 'def'),
('<strong>foo</strong><a href="http://example.com">bar</a>', 'foobar'),
# caused infinite loop on Pythons not patched with
# http://bugs.python.org/issue20288
('&gotcha&#;<>', '&gotcha&#;<>'),
)
for value, output in items:
self.check_output(f, value, output)
# Some convoluted syntax for which parsing may differ between python versions
output = html.strip_tags('<sc<!-- -->ript>test<<!-- -->/script>')
self.assertNotIn('<script>', output)
self.assertIn('test', output)
output = html.strip_tags('<script>alert()</script>&h')
self.assertNotIn('<script>', output)
self.assertIn('alert()', output)
# Test with more lengthy content (also catching performance regressions)
for filename in ('strip_tags1.html', 'strip_tags2.txt'):
path = os.path.join(os.path.dirname(upath(__file__)), 'files', filename)
with open(path, 'r') as fp:
content = force_text(fp.read())
start = datetime.now()
stripped = html.strip_tags(content)
elapsed = datetime.now() - start
self.assertEqual(elapsed.seconds, 0)
self.assertIn("Please try again.", stripped)
self.assertNotIn('<', stripped)
def test_strip_spaces_between_tags(self):
f = html.strip_spaces_between_tags
# Strings that should come out untouched.
items = (' <adf>', '<adf> ', ' </adf> ', ' <f> x</f>')
for value in items:
self.check_output(f, value)
# Strings that have spaces to strip.
items = (
('<d> </d>', '<d></d>'),
('<p>hello </p>\n<p> world</p>', '<p>hello </p><p> world</p>'),
('\n<p>\t</p>\n<p> </p>\n', '\n<p></p><p></p>\n'),
)
for value, output in items:
self.check_output(f, value, output)
@ignore_warnings(category=RemovedInDjango110Warning)
def test_strip_entities(self):
f = html.strip_entities
# Strings that should come out untouched.
values = ("&", "&a", "&a", "a&#a")
for value in values:
self.check_output(f, value)
# Valid entities that should be stripped from the patterns.
entities = ("", "", "&a;", "&fdasdfasdfasdf;")
patterns = (
("asdf %(entity)s ", "asdf "),
("%(entity)s%(entity)s", ""),
("&%(entity)s%(entity)s", "&"),
("%(entity)s3", "3"),
)
for entity in entities:
for in_pattern, output in patterns:
self.check_output(f, in_pattern % {'entity': entity}, output)
def test_escapejs(self):
f = html.escapejs
items = (
('"double quotes" and \'single quotes\'', '\\u0022double quotes\\u0022 and \\u0027single quotes\\u0027'),
(r'\ : backslashes, too', '\\u005C : backslashes, too'),
('and lots of whitespace: \r\n\t\v\f\b', 'and lots of whitespace: \\u000D\\u000A\\u0009\\u000B\\u000C\\u0008'),
(r'<script>and this</script>', '\\u003Cscript\\u003Eand this\\u003C/script\\u003E'),
('paragraph separator:\u2029and line separator:\u2028', 'paragraph separator:\\u2029and line separator:\\u2028'),
)
for value, output in items:
self.check_output(f, value, output)
@ignore_warnings(category=RemovedInDjango110Warning)
def test_remove_tags(self):
f = html.remove_tags
items = (
("<b><i>Yes</i></b>", "b i", "Yes"),
("<a>x</a> <p><b>y</b></p>", "a b", "x <p>y</p>"),
)
for value, tags, output in items:
self.assertEqual(f(value, tags), output)
def test_smart_urlquote(self):
quote = html.smart_urlquote
# Ensure that IDNs are properly quoted
self.assertEqual(quote('http://öäü.com/'), 'http://xn--4ca9at.com/')
self.assertEqual(quote('http://öäü.com/öäü/'), 'http://xn--4ca9at.com/%C3%B6%C3%A4%C3%BC/')
# Ensure that everything unsafe is quoted, !*'();:@&=+$,/?#[]~ is considered safe as per RFC
self.assertEqual(quote('http://example.com/path/öäü/'), 'http://example.com/path/%C3%B6%C3%A4%C3%BC/')
self.assertEqual(quote('http://example.com/%C3%B6/ä/'), 'http://example.com/%C3%B6/%C3%A4/')
self.assertEqual(quote('http://example.com/?x=1&y=2+3&z='), 'http://example.com/?x=1&y=2+3&z=')
self.assertEqual(quote('http://example.com/?x=<>"\''), 'http://example.com/?x=%3C%3E%22%27')
self.assertEqual(quote('http://example.com/?q=http://example.com/?x=1%26q=django'),
'http://example.com/?q=http%3A%2F%2Fexample.com%2F%3Fx%3D1%26q%3Ddjango')
self.assertEqual(quote('http://example.com/?q=http%3A%2F%2Fexample.com%2F%3Fx%3D1%26q%3Ddjango'),
'http://example.com/?q=http%3A%2F%2Fexample.com%2F%3Fx%3D1%26q%3Ddjango')
def test_conditional_escape(self):
s = '<h1>interop</h1>'
self.assertEqual(html.conditional_escape(s),
'<h1>interop</h1>')
self.assertEqual(html.conditional_escape(safestring.mark_safe(s)), s)
def test_html_safe(self):
@html.html_safe
class HtmlClass(object):
if six.PY2:
def __unicode__(self):
return "<h1>I'm a html class!</h1>"
else:
def __str__(self):
return "<h1>I'm a html class!</h1>"
html_obj = HtmlClass()
self.assertTrue(hasattr(HtmlClass, '__html__'))
self.assertTrue(hasattr(html_obj, '__html__'))
self.assertEqual(force_text(html_obj), html_obj.__html__())
def test_html_safe_subclass(self):
if six.PY2:
class BaseClass(object):
def __html__(self):
# defines __html__ on its own
return 'some html content'
def __unicode__(self):
return 'some non html content'
@html.html_safe
class Subclass(BaseClass):
def __unicode__(self):
# overrides __unicode__ and is marked as html_safe
return 'some html safe content'
else:
class BaseClass(object):
def __html__(self):
# defines __html__ on its own
return 'some html content'
def __str__(self):
return 'some non html content'
@html.html_safe
class Subclass(BaseClass):
def __str__(self):
# overrides __str__ and is marked as html_safe
return 'some html safe content'
subclass_obj = Subclass()
self.assertEqual(force_text(subclass_obj), subclass_obj.__html__())
def test_html_safe_defines_html_error(self):
msg = "can't apply @html_safe to HtmlClass because it defines __html__()."
with self.assertRaisesMessage(ValueError, msg):
@html.html_safe
class HtmlClass(object):
def __html__(self):
return "<h1>I'm a html class!</h1>"
def test_html_safe_doesnt_define_str(self):
method_name = '__unicode__()' if six.PY2 else '__str__()'
msg = "can't apply @html_safe to HtmlClass because it doesn't define %s." % method_name
with self.assertRaisesMessage(ValueError, msg):
@html.html_safe
class HtmlClass(object):
pass
| bsd-3-clause |
jjhuff/fcc-comments | lib/nltk/sem/evaluate.py | 5 | 22893 | # Natural Language Toolkit: Models for first-order languages with lambda
#
# Copyright (C) 2001-2012 NLTK Project
# Author: Ewan Klein <[email protected]>,
# URL: <http://nltk.sourceforge.net>
# For license information, see LICENSE.TXT
#TODO:
#- fix tracing
#- fix iterator-based approach to existentials
"""
This module provides data structures for representing first-order
models.
"""
from __future__ import print_function
from pprint import pformat
import inspect
import textwrap
from nltk.decorators import decorator
from nltk.sem.logic import (AbstractVariableExpression, AllExpression,
AndExpression, ApplicationExpression, EqualityExpression,
ExistsExpression, IffExpression, ImpExpression,
IndividualVariableExpression, LambdaExpression,
LogicParser, NegatedExpression, OrExpression,
Variable, is_indvar)
class Error(Exception): pass
class Undefined(Error): pass
def trace(f, *args, **kw):
argspec = inspect.getargspec(f)
d = dict(zip(argspec[0], args))
if d.pop('trace', None):
print()
for item in d.items():
print("%s => %s" % item)
return f(*args, **kw)
def is_rel(s):
"""
Check whether a set represents a relation (of any arity).
:param s: a set containing tuples of str elements
:type s: set
:rtype: bool
"""
# we have the empty relation, i.e. set()
if len(s) == 0:
return True
# all the elements are tuples of the same length
elif s == set([elem for elem in s if isinstance(elem, tuple)]) and\
len(max(s))==len(min(s)):
return True
else:
raise ValueError("Set %r contains sequences of different lengths" % s)
def set2rel(s):
"""
Convert a set containing individuals (strings or numbers) into a set of
unary tuples. Any tuples of strings already in the set are passed through
unchanged.
For example:
- set(['a', 'b']) => set([('a',), ('b',)])
- set([3, 27]) => set([('3',), ('27',)])
:type s: set
:rtype: set of tuple of str
"""
new = set()
for elem in s:
if isinstance(elem, str):
new.add((elem,))
elif isinstance(elem, int):
new.add((str(elem,)))
else:
new.add(elem)
return new
def arity(rel):
"""
Check the arity of a relation.
:type rel: set of tuples
:rtype: int of tuple of str
"""
if len(rel) == 0:
return 0
return len(list(rel)[0])
class Valuation(dict):
"""
A dictionary which represents a model-theoretic Valuation of non-logical constants.
Keys are strings representing the constants to be interpreted, and values correspond
to individuals (represented as strings) and n-ary relations (represented as sets of tuples
of strings).
An instance of ``Valuation`` will raise a KeyError exception (i.e.,
just behave like a standard dictionary) if indexed with an expression that
is not in its list of symbols.
"""
def __init__(self, iter):
"""
:param iter: a list of (symbol, value) pairs.
"""
dict.__init__(self)
for (sym, val) in iter:
if isinstance(val, str) or isinstance(val, bool):
self[sym] = val
elif isinstance(val, set):
self[sym] = set2rel(val)
else:
msg = textwrap.fill("Error in initializing Valuation. "
"Unrecognized value for symbol '%s':\n%s" % (sym, val), width=66)
raise ValueError(msg)
def __getitem__(self, key):
if key in self:
return dict.__getitem__(self, key)
else:
raise Undefined("Unknown expression: '%s'" % key)
def __str__(self):
return pformat(self)
@property
def domain(self):
"""Set-theoretic domain of the value-space of a Valuation."""
dom = []
for val in self.values():
if isinstance(val, str):
dom.append(val)
elif not isinstance(val, bool):
dom.extend([elem for tuple in val for elem in tuple if elem is not None])
return set(dom)
@property
def symbols(self):
"""The non-logical constants which the Valuation recognizes."""
return sorted(self.keys())
class Assignment(dict):
"""
A dictionary which represents an assignment of values to variables.
An assigment can only assign values from its domain.
If an unknown expression *a* is passed to a model *M*\ 's
interpretation function *i*, *i* will first check whether *M*\ 's
valuation assigns an interpretation to *a* as a constant, and if
this fails, *i* will delegate the interpretation of *a* to
*g*. *g* only assigns values to individual variables (i.e.,
members of the class ``IndividualVariableExpression`` in the ``logic``
module. If a variable is not assigned a value by *g*, it will raise
an ``Undefined`` exception.
A variable *Assignment* is a mapping from individual variables to
entities in the domain. Individual variables are usually indicated
with the letters ``'x'``, ``'y'``, ``'w'`` and ``'z'``, optionally
followed by an integer (e.g., ``'x0'``, ``'y332'``). Assignments are
created using the ``Assignment`` constructor, which also takes the
domain as a parameter.
>>> from nltk.sem.evaluate import Assignment
>>> dom = set(['u1', 'u2', 'u3', 'u4'])
>>> g3 = Assignment(dom, [('x', 'u1'), ('y', 'u2')])
>>> g3
{'y': 'u2', 'x': 'u1'}
There is also a ``print`` format for assignments which uses a notation
closer to that in logic textbooks:
>>> print g3
g[u2/y][u1/x]
It is also possible to update an assignment using the ``add`` method:
>>> dom = set(['u1', 'u2', 'u3', 'u4'])
>>> g4 = Assignment(dom)
>>> g4.add('x', 'u1')
{'x': 'u1'}
With no arguments, ``purge()`` is equivalent to ``clear()`` on a dictionary:
>>> g4.purge()
>>> g4
{}
:param domain: the domain of discourse
:type domain: set
:param assign: a list of (varname, value) associations
:type assign: list
"""
def __init__(self, domain, assign=None):
dict.__init__(self)
self.domain = domain
if assign:
for (var, val) in assign:
assert val in self.domain,\
"'%s' is not in the domain: %s" % (val, self.domain)
assert is_indvar(var),\
"Wrong format for an Individual Variable: '%s'" % var
self[var] = val
self._addvariant()
def __getitem__(self, key):
if key in self:
return dict.__getitem__(self, key)
else:
raise Undefined("Not recognized as a variable: '%s'" % key)
def copy(self):
new = Assignment(self.domain)
new.update(self)
return new
def purge(self, var=None):
"""
Remove one or all keys (i.e. logic variables) from an
assignment, and update ``self.variant``.
:param var: a Variable acting as a key for the assignment.
"""
if var:
val = self[var]
del self[var]
else:
self.clear()
self._addvariant()
return None
def __str__(self):
"""
Pretty printing for assignments. {'x', 'u'} appears as 'g[u/x]'
"""
gstring = "g"
for (val, var) in self.variant:
gstring += "[%s/%s]" % (val, var)
return gstring
def _addvariant(self):
"""
Create a more pretty-printable version of the assignment.
"""
list = []
for item in self.items():
pair = (item[1], item[0])
list.append(pair)
self.variant = list
return None
def add(self, var, val):
"""
Add a new variable-value pair to the assignment, and update
``self.variant``.
"""
assert val in self.domain,\
"%s is not in the domain %s" % (val, self.domain)
assert is_indvar(var),\
"Wrong format for an Individual Variable: '%s'" % var
self[var] = val
self._addvariant()
return self
class Model(object):
"""
A first order model is a domain *D* of discourse and a valuation *V*.
A domain *D* is a set, and a valuation *V* is a map that associates
expressions with values in the model.
The domain of *V* should be a subset of *D*.
Construct a new ``Model``.
:type domain: set
:param domain: A set of entities representing the domain of discourse of the model.
:type valuation: Valuation
:param valuation: the valuation of the model.
:param prop: If this is set, then we are building a propositional\
model and don't require the domain of *V* to be subset of *D*.
"""
def __init__(self, domain, valuation):
assert isinstance(domain, set)
self.domain = domain
self.valuation = valuation
if not domain.issuperset(valuation.domain):
raise Error("The valuation domain, %s, must be a subset of the model's domain, %s"\
% (valuation.domain, domain))
def __repr__(self):
return "(%r, %r)" % (self.domain, self.valuation)
def __str__(self):
return "Domain = %s,\nValuation = \n%s" % (self.domain, self.valuation)
def evaluate(self, expr, g, trace=None):
"""
Call the ``LogicParser`` to parse input expressions, and
provide a handler for ``satisfy``
that blocks further propagation of the ``Undefined`` error.
:param expr: An ``Expression`` of ``logic``.
:type g: Assignment
:param g: an assignment to individual variables.
:rtype: bool or 'Undefined'
"""
try:
lp = LogicParser()
parsed = lp.parse(expr)
value = self.satisfy(parsed, g, trace=trace)
if trace:
print()
print("'%s' evaluates to %s under M, %s" % (expr, value, g))
return value
except Undefined:
if trace:
print()
print("'%s' is undefined under M, %s" % (expr, g))
return 'Undefined'
def satisfy(self, parsed, g, trace=None):
"""
Recursive interpretation function for a formula of first-order logic.
Raises an ``Undefined`` error when ``parsed`` is an atomic string
but is not a symbol or an individual variable.
:return: Returns a truth value or ``Undefined`` if ``parsed`` is\
complex, and calls the interpretation function ``i`` if ``parsed``\
is atomic.
:param parsed: An expression of ``logic``.
:type g: Assignment
:param g: an assignment to individual variables.
"""
if isinstance(parsed, ApplicationExpression):
function, arguments = parsed.uncurry()
if isinstance(function, AbstractVariableExpression):
#It's a predicate expression ("P(x,y)"), so used uncurried arguments
funval = self.satisfy(function, g)
argvals = tuple([self.satisfy(arg, g) for arg in arguments])
return argvals in funval
else:
#It must be a lambda expression, so use curried form
funval = self.satisfy(parsed.function, g)
argval = self.satisfy(parsed.argument, g)
return funval[argval]
elif isinstance(parsed, NegatedExpression):
return not self.satisfy(parsed.term, g)
elif isinstance(parsed, AndExpression):
return self.satisfy(parsed.first, g) and \
self.satisfy(parsed.second, g)
elif isinstance(parsed, OrExpression):
return self.satisfy(parsed.first, g) or \
self.satisfy(parsed.second, g)
elif isinstance(parsed, ImpExpression):
return (not self.satisfy(parsed.first, g)) or \
self.satisfy(parsed.second, g)
elif isinstance(parsed, IffExpression):
return self.satisfy(parsed.first, g) == \
self.satisfy(parsed.second, g)
elif isinstance(parsed, EqualityExpression):
return self.satisfy(parsed.first, g) == \
self.satisfy(parsed.second, g)
elif isinstance(parsed, AllExpression):
new_g = g.copy()
for u in self.domain:
new_g.add(parsed.variable.name, u)
if not self.satisfy(parsed.term, new_g):
return False
return True
elif isinstance(parsed, ExistsExpression):
new_g = g.copy()
for u in self.domain:
new_g.add(parsed.variable.name, u)
if self.satisfy(parsed.term, new_g):
return True
return False
elif isinstance(parsed, LambdaExpression):
cf = {}
var = parsed.variable.name
for u in self.domain:
val = self.satisfy(parsed.term, g.add(var, u))
# NB the dict would be a lot smaller if we do this:
# if val: cf[u] = val
# But then need to deal with cases where f(a) should yield
# a function rather than just False.
cf[u] = val
return cf
else:
return self.i(parsed, g, trace)
#@decorator(trace_eval)
def i(self, parsed, g, trace=False):
"""
An interpretation function.
Assuming that ``parsed`` is atomic:
- if ``parsed`` is a non-logical constant, calls the valuation *V*
- else if ``parsed`` is an individual variable, calls assignment *g*
- else returns ``Undefined``.
:param parsed: an ``Expression`` of ``logic``.
:type g: Assignment
:param g: an assignment to individual variables.
:return: a semantic value
"""
# If parsed is a propositional letter 'p', 'q', etc, it could be in valuation.symbols
# and also be an IndividualVariableExpression. We want to catch this first case.
# So there is a procedural consequence to the ordering of clauses here:
if parsed.variable.name in self.valuation.symbols:
return self.valuation[parsed.variable.name]
elif isinstance(parsed, IndividualVariableExpression):
return g[parsed.variable.name]
else:
raise Undefined("Can't find a value for %s" % parsed)
def satisfiers(self, parsed, varex, g, trace=None, nesting=0):
"""
Generate the entities from the model's domain that satisfy an open formula.
:param parsed: an open formula
:type parsed: Expression
:param varex: the relevant free individual variable in ``parsed``.
:type varex: VariableExpression or str
:param g: a variable assignment
:type g: Assignment
:return: a set of the entities that satisfy ``parsed``.
"""
spacer = ' '
indent = spacer + (spacer * nesting)
candidates = []
if isinstance(varex, str):
var = Variable(varex)
else:
var = varex
if var in parsed.free():
if trace:
print()
print((spacer * nesting) + "Open formula is '%s' with assignment %s" % (parsed, g))
for u in self.domain:
new_g = g.copy()
new_g.add(var.name, u)
if trace > 1:
lowtrace = trace-1
else:
lowtrace = 0
value = self.satisfy(parsed, new_g, lowtrace)
if trace:
print(indent + "(trying assignment %s)" % new_g)
# parsed == False under g[u/var]?
if value == False:
if trace:
print(indent + "value of '%s' under %s is False" % (parsed, new_g))
# so g[u/var] is a satisfying assignment
else:
candidates.append(u)
if trace:
print(indent + "value of '%s' under %s is %s" % (parsed, new_g, value))
result = set(c for c in candidates)
# var isn't free in parsed
else:
raise Undefined("%s is not free in %s" % (var.name, parsed))
return result
#//////////////////////////////////////////////////////////////////////
# Demo..
#//////////////////////////////////////////////////////////////////////
# number of spacer chars
mult = 30
# Demo 1: Propositional Logic
#################
def propdemo(trace=None):
"""Example of a propositional model."""
global val1, dom1, m1, g1
val1 = Valuation([('P', True), ('Q', True), ('R', False)])
dom1 = set([])
m1 = Model(dom1, val1)
g1 = Assignment(dom1)
print()
print('*' * mult)
print("Propositional Formulas Demo")
print('*' * mult)
print('(Propositional constants treated as nullary predicates)')
print()
print("Model m1:\n", m1)
print('*' * mult)
sentences = [
'(P & Q)',
'(P & R)',
'- P',
'- R',
'- - P',
'- (P & R)',
'(P | R)',
'(R | P)',
'(R | R)',
'(- P | R)',
'(P | - P)',
'(P -> Q)',
'(P -> R)',
'(R -> P)',
'(P <-> P)',
'(R <-> R)',
'(P <-> R)',
]
for sent in sentences:
if trace:
print()
m1.evaluate(sent, g1, trace)
else:
print("The value of '%s' is: %s" % (sent, m1.evaluate(sent, g1)))
# Demo 2: FOL Model
#############
def folmodel(quiet=False, trace=None):
"""Example of a first-order model."""
global val2, v2, dom2, m2, g2
v2 = [('adam', 'b1'), ('betty', 'g1'), ('fido', 'd1'),\
('girl', set(['g1', 'g2'])), ('boy', set(['b1', 'b2'])), ('dog', set(['d1'])),
('love', set([('b1', 'g1'), ('b2', 'g2'), ('g1', 'b1'), ('g2', 'b1')]))]
val2 = Valuation(v2)
dom2 = val2.domain
m2 = Model(dom2, val2)
g2 = Assignment(dom2, [('x', 'b1'), ('y', 'g2')])
if not quiet:
print()
print('*' * mult)
print("Models Demo")
print("*" * mult)
print("Model m2:\n", "-" * 14,"\n", m2)
print("Variable assignment = ", g2)
exprs = ['adam', 'boy', 'love', 'walks', 'x', 'y', 'z']
lp = LogicParser()
parsed_exprs = [lp.parse(e) for e in exprs]
print()
for parsed in parsed_exprs:
try:
print("The interpretation of '%s' in m2 is %s" % (parsed, m2.i(parsed, g2)))
except Undefined:
print("The interpretation of '%s' in m2 is Undefined" % parsed)
applications = [('boy', ('adam')), ('walks', ('adam',)), ('love', ('adam', 'y')), ('love', ('y', 'adam'))]
for (fun, args) in applications:
try:
funval = m2.i(lp.parse(fun), g2)
argsval = tuple(m2.i(lp.parse(arg), g2) for arg in args)
print("%s(%s) evaluates to %s" % (fun, args, argsval in funval))
except Undefined:
print("%s(%s) evaluates to Undefined" % (fun, args))
# Demo 3: FOL
#########
def foldemo(trace=None):
"""
Interpretation of closed expressions in a first-order model.
"""
folmodel(quiet=True)
print()
print('*' * mult)
print("FOL Formulas Demo")
print('*' * mult)
formulas = [
'love (adam, betty)',
'(adam = mia)',
'\\x. (boy(x) | girl(x))',
'\\x. boy(x)(adam)',
'\\x y. love(x, y)',
'\\x y. love(x, y)(adam)(betty)',
'\\x y. love(x, y)(adam, betty)',
'\\x y. (boy(x) & love(x, y))',
'\\x. exists y. (boy(x) & love(x, y))',
'exists z1. boy(z1)',
'exists x. (boy(x) & -(x = adam))',
'exists x. (boy(x) & all y. love(y, x))',
'all x. (boy(x) | girl(x))',
'all x. (girl(x) -> exists y. boy(y) & love(x, y))', #Every girl loves exists boy.
'exists x. (boy(x) & all y. (girl(y) -> love(y, x)))', #There is exists boy that every girl loves.
'exists x. (boy(x) & all y. (girl(y) -> love(x, y)))', #exists boy loves every girl.
'all x. (dog(x) -> - girl(x))',
'exists x. exists y. (love(x, y) & love(x, y))'
]
for fmla in formulas:
g2.purge()
if trace:
m2.evaluate(fmla, g2, trace)
else:
print("The value of '%s' is: %s" % (fmla, m2.evaluate(fmla, g2)))
# Demo 3: Satisfaction
#############
def satdemo(trace=None):
"""Satisfiers of an open formula in a first order model."""
print()
print('*' * mult)
print("Satisfiers Demo")
print('*' * mult)
folmodel(quiet=True)
formulas = [
'boy(x)',
'(x = x)',
'(boy(x) | girl(x))',
'(boy(x) & girl(x))',
'love(adam, x)',
'love(x, adam)',
'-(x = adam)',
'exists z22. love(x, z22)',
'exists y. love(y, x)',
'all y. (girl(y) -> love(x, y))',
'all y. (girl(y) -> love(y, x))',
'all y. (girl(y) -> (boy(x) & love(y, x)))',
'(boy(x) & all y. (girl(y) -> love(x, y)))',
'(boy(x) & all y. (girl(y) -> love(y, x)))',
'(boy(x) & exists y. (girl(y) & love(y, x)))',
'(girl(x) -> dog(x))',
'all y. (dog(y) -> (x = y))',
'exists y. love(y, x)',
'exists y. (love(adam, y) & love(y, x))'
]
if trace:
print(m2)
lp = LogicParser()
for fmla in formulas:
print(fmla)
lp.parse(fmla)
parsed = [lp.parse(fmla) for fmla in formulas]
for p in parsed:
g2.purge()
print("The satisfiers of '%s' are: %s" % (p, m2.satisfiers(p, 'x', g2, trace)))
def demo(num=0, trace=None):
"""
Run exists demos.
- num = 1: propositional logic demo
- num = 2: first order model demo (only if trace is set)
- num = 3: first order sentences demo
- num = 4: satisfaction of open formulas demo
- any other value: run all the demos
:param trace: trace = 1, or trace = 2 for more verbose tracing
"""
demos = {
1: propdemo,
2: folmodel,
3: foldemo,
4: satdemo}
try:
demos[num](trace=trace)
except KeyError:
for num in demos:
demos[num](trace=trace)
if __name__ == "__main__":
demo(2, trace=0)
| apache-2.0 |
caffeinehit/yell | yell/backends/celery.py | 1 | 2316 | from __future__ import absolute_import
from celery.task import Task
from yell import Notification, notify, registry
class CeleryNotificationTask(Task):
""" Dispatch and run the notification. """
def run(self, name=None, backend=None, *args, **kwargs):
"""
The Celery task.
Delivers the notification via all backends returned by :param:`backend`.
"""
assert name is not None, "No 'name' specified to notify"
assert backend is not None, "No 'backend' specified to notify with"
backends = backend().get_backends(*args, **kwargs)
notify(name, backends=backends, *args, **kwargs)
class CeleryNotification(Notification):
"""
Delivers notifications through Celery.
:example:
::
from yell import notify, Notification
class EmailNotification(Notification):
name = 'async'
def notify(self, *args, **kwargs):
# Deliver email
class DBNotification(Notification):
name = 'async'
def notify(self, *args, **kwargs):
# Save to database
class AsyncNotification(CeleryNotification):
name = 'async'
notify('async', backends = [AsyncNotification],
text = "This notification is routed through Celery before being sent and saved")
In the above example when calling :attr:`yell.notify` will invoke ``EmailNotification`` and
``DBNotification`` once the task was delivered through Celery.
"""
name = None
"""
The name of this notification. Override in subclasses.
"""
def get_backends(self, *args, **kwargs):
"""
Return all backends the task should use to deliver notifications.
By default all backends with the same :attr:`name` except for subclasses
of :class:`CeleryNotifications` will be used.
"""
return filter(lambda cls: not issubclass(cls, self.__class__), registry.notifications[self.name])
def notify(self, *args, **kwargs):
"""
Dispatches the notification to Celery
"""
return CeleryNotificationTask.delay(name=self.name, backend=self.__class__, *args, **kwargs)
| mit |
TimYi/django | tests/one_to_one/models.py | 203 | 3343 | """
One-to-one relationships
To define a one-to-one relationship, use ``OneToOneField()``.
In this example, a ``Place`` optionally can be a ``Restaurant``.
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Place(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=80)
def __str__(self):
return "%s the place" % self.name
@python_2_unicode_compatible
class Restaurant(models.Model):
place = models.OneToOneField(Place, models.CASCADE, primary_key=True)
serves_hot_dogs = models.BooleanField(default=False)
serves_pizza = models.BooleanField(default=False)
def __str__(self):
return "%s the restaurant" % self.place.name
@python_2_unicode_compatible
class Bar(models.Model):
place = models.OneToOneField(Place, models.CASCADE)
serves_cocktails = models.BooleanField(default=True)
def __str__(self):
return "%s the bar" % self.place.name
class UndergroundBar(models.Model):
place = models.OneToOneField(Place, models.SET_NULL, null=True)
serves_cocktails = models.BooleanField(default=True)
@python_2_unicode_compatible
class Waiter(models.Model):
restaurant = models.ForeignKey(Restaurant, models.CASCADE)
name = models.CharField(max_length=50)
def __str__(self):
return "%s the waiter at %s" % (self.name, self.restaurant)
@python_2_unicode_compatible
class Favorites(models.Model):
name = models.CharField(max_length=50)
restaurants = models.ManyToManyField(Restaurant)
def __str__(self):
return "Favorites for %s" % self.name
class ManualPrimaryKey(models.Model):
primary_key = models.CharField(max_length=10, primary_key=True)
name = models.CharField(max_length=50)
class RelatedModel(models.Model):
link = models.OneToOneField(ManualPrimaryKey, models.CASCADE)
name = models.CharField(max_length=50)
@python_2_unicode_compatible
class MultiModel(models.Model):
link1 = models.OneToOneField(Place, models.CASCADE)
link2 = models.OneToOneField(ManualPrimaryKey, models.CASCADE)
name = models.CharField(max_length=50)
def __str__(self):
return "Multimodel %s" % self.name
class Target(models.Model):
name = models.CharField(max_length=50)
class Pointer(models.Model):
other = models.OneToOneField(Target, models.CASCADE, primary_key=True)
class Pointer2(models.Model):
other = models.OneToOneField(Target, models.CASCADE, related_name='second_pointer')
class HiddenPointer(models.Model):
target = models.OneToOneField(Target, models.CASCADE, related_name='hidden+')
# Test related objects visibility.
class SchoolManager(models.Manager):
def get_queryset(self):
return super(SchoolManager, self).get_queryset().filter(is_public=True)
class School(models.Model):
is_public = models.BooleanField(default=False)
objects = SchoolManager()
class DirectorManager(models.Manager):
def get_queryset(self):
return super(DirectorManager, self).get_queryset().filter(is_temp=False)
class Director(models.Model):
is_temp = models.BooleanField(default=False)
school = models.OneToOneField(School, models.CASCADE)
objects = DirectorManager()
| bsd-3-clause |
CiNC0/Cartier | cartier-python-resign-linux/tests/test_versioning.py | 1 | 1194 | #!/usr/bin/env python
import os.path
import importlib
import unittest
tests_dir = os.path.abspath(os.path.dirname(__file__))
package_name = tests_dir.split(os.path.sep)[-2].replace('-', '_')
package = importlib.import_module(package_name)
class VersioningTestCase(unittest.TestCase):
def assert_proper_attribute(self, attribute):
try:
assert getattr(package, attribute), (
"{} improperly set".format(attribute))
except AttributeError:
assert False, "missing {}".format(attribute)
def test_version_attribute(self):
self.assert_proper_attribute("__version__")
# test major, minor, and patch are numbers
version_split = package.__version__.split(".")[:3]
assert version_split, "__version__ is not set"
for n in version_split:
try:
int(n)
except ValueError:
assert False, "'{}' is not an integer".format(n)
def test_commit_attribute(self):
self.assert_proper_attribute("__commit__")
def test_build_attribute(self):
self.assert_proper_attribute("__build__")
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
ortylp/scipy | scipy/special/tests/test_basic.py | 4 | 122266 | # this program corresponds to special.py
### Means test is not done yet
# E Means test is giving error (E)
# F Means test is failing (F)
# EF Means test is giving error and Failing
#! Means test is segfaulting
# 8 Means test runs forever
### test_besselpoly
### test_mathieu_a
### test_mathieu_even_coef
### test_mathieu_odd_coef
### test_modfresnelp
### test_modfresnelm
# test_pbdv_seq
### test_pbvv_seq
### test_sph_harm
# test_sph_in
# test_sph_jn
# test_sph_kn
from __future__ import division, print_function, absolute_import
import itertools
import warnings
import numpy as np
from numpy import array, isnan, r_, arange, finfo, pi, sin, cos, tan, exp, \
log, zeros, sqrt, asarray, inf, nan_to_num, real, arctan, float_
from numpy.testing import assert_equal, assert_almost_equal, \
assert_array_equal, assert_array_almost_equal, assert_approx_equal, \
assert_, rand, dec, TestCase, run_module_suite, assert_allclose, \
assert_raises, assert_array_almost_equal_nulp
from scipy import special
import scipy.special._ufuncs as cephes
from scipy.special import ellipk
from scipy.special._testutils import assert_tol_equal, with_special_errors, \
assert_func_equal
class TestCephes(TestCase):
def test_airy(self):
cephes.airy(0)
def test_airye(self):
cephes.airye(0)
def test_binom(self):
n = np.array([0.264, 4, 5.2, 17])
k = np.array([2, 0.4, 7, 3.3])
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
rknown = np.array([[-0.097152, 0.9263051596159367, 0.01858423645695389,
-0.007581020651518199],[6, 2.0214389119675666, 0, 2.9827344527963846],
[10.92, 2.22993515861399, -0.00585728, 10.468891352063146],
[136, 3.5252179590758828, 19448, 1024.5526916174495]])
assert_func_equal(cephes.binom, rknown.ravel(), nk, rtol=1e-13)
# Test branches in implementation
np.random.seed(1234)
n = np.r_[np.arange(-7, 30), 1000*np.random.rand(30) - 500]
k = np.arange(0, 102)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
assert_func_equal(cephes.binom,
cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)),
nk,
atol=1e-10, rtol=1e-10)
def test_binom_2(self):
# Test branches in implementation
np.random.seed(1234)
n = np.r_[np.logspace(1, 300, 20)]
k = np.arange(0, 102)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
assert_func_equal(cephes.binom,
cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)),
nk,
atol=1e-10, rtol=1e-10)
def test_binom_exact(self):
@np.vectorize
def binom_int(n, k):
n = int(n)
k = int(k)
num = int(1)
den = int(1)
for i in range(1, k+1):
num *= i + n - k
den *= i
return float(num/den)
np.random.seed(1234)
n = np.arange(1, 15)
k = np.arange(0, 15)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
nk = nk[nk[:,0] >= nk[:,1]]
assert_func_equal(cephes.binom,
binom_int(nk[:,0], nk[:,1]),
nk,
atol=0, rtol=0)
def test_bdtr(self):
assert_equal(cephes.bdtr(1,1,0.5),1.0)
def test_bdtri(self):
assert_equal(cephes.bdtri(1,3,0.5),0.5)
def test_bdtrc(self):
assert_equal(cephes.bdtrc(1,3,0.5),0.5)
def test_bdtrin(self):
assert_equal(cephes.bdtrin(1,0,1),5.0)
def test_bdtrik(self):
cephes.bdtrik(1,3,0.5)
def test_bei(self):
assert_equal(cephes.bei(0),0.0)
def test_beip(self):
assert_equal(cephes.beip(0),0.0)
def test_ber(self):
assert_equal(cephes.ber(0),1.0)
def test_berp(self):
assert_equal(cephes.berp(0),0.0)
def test_besselpoly(self):
assert_equal(cephes.besselpoly(0,0,0),1.0)
def test_beta(self):
assert_equal(cephes.beta(1,1),1.0)
assert_allclose(cephes.beta(-100.3, 1e-200), cephes.gamma(1e-200))
assert_allclose(cephes.beta(0.0342, 171), 24.070498359873497,
rtol=1e-13, atol=0)
def test_betainc(self):
assert_equal(cephes.betainc(1,1,1),1.0)
assert_allclose(cephes.betainc(0.0342, 171, 1e-10), 0.55269916901806648)
def test_betaln(self):
assert_equal(cephes.betaln(1,1),0.0)
assert_allclose(cephes.betaln(-100.3, 1e-200), cephes.gammaln(1e-200))
assert_allclose(cephes.betaln(0.0342, 170), 3.1811881124242447,
rtol=1e-14, atol=0)
def test_betaincinv(self):
assert_equal(cephes.betaincinv(1,1,1),1.0)
assert_allclose(cephes.betaincinv(0.0342, 171, 0.25),
8.4231316935498957e-21, rtol=3e-12, atol=0)
def test_beta_inf(self):
assert_(np.isinf(special.beta(-1, 2)))
def test_btdtr(self):
assert_equal(cephes.btdtr(1,1,1),1.0)
def test_btdtri(self):
assert_equal(cephes.btdtri(1,1,1),1.0)
def test_btdtria(self):
assert_equal(cephes.btdtria(1,1,1),5.0)
def test_btdtrib(self):
assert_equal(cephes.btdtrib(1,1,1),5.0)
def test_cbrt(self):
assert_approx_equal(cephes.cbrt(1),1.0)
def test_chdtr(self):
assert_equal(cephes.chdtr(1,0),0.0)
def test_chdtrc(self):
assert_equal(cephes.chdtrc(1,0),1.0)
def test_chdtri(self):
assert_equal(cephes.chdtri(1,1),0.0)
def test_chdtriv(self):
assert_equal(cephes.chdtriv(0,0),5.0)
def test_chndtr(self):
assert_equal(cephes.chndtr(0,1,0),0.0)
p = cephes.chndtr(np.linspace(20, 25, 5), 2, 1.07458615e+02)
assert_allclose(p, [1.21805009e-09, 2.81979982e-09, 6.25652736e-09,
1.33520017e-08, 2.74909967e-08],
rtol=1e-6, atol=0)
assert_almost_equal(cephes.chndtr(np.inf, np.inf, 0), 2.0)
assert_almost_equal(cephes.chndtr(2, 1, np.inf), 0.0)
assert_(np.isnan(cephes.chndtr(np.nan, 1, 2)))
assert_(np.isnan(cephes.chndtr(5, np.nan, 2)))
assert_(np.isnan(cephes.chndtr(5, 1, np.nan)))
def test_chndtridf(self):
assert_equal(cephes.chndtridf(0,0,1),5.0)
def test_chndtrinc(self):
assert_equal(cephes.chndtrinc(0,1,0),5.0)
def test_chndtrix(self):
assert_equal(cephes.chndtrix(0,1,0),0.0)
def test_cosdg(self):
assert_equal(cephes.cosdg(0),1.0)
def test_cosm1(self):
assert_equal(cephes.cosm1(0),0.0)
def test_cotdg(self):
assert_almost_equal(cephes.cotdg(45),1.0)
def test_dawsn(self):
assert_equal(cephes.dawsn(0),0.0)
assert_allclose(cephes.dawsn(1.23), 0.50053727749081767)
def test_diric(self):
# Test behavior near multiples of 2pi. Regression test for issue
# described in gh-4001.
n_odd = [1, 5, 25]
x = np.array(2*np.pi + 5e-5).astype(np.float32)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=7)
x = np.array(2*np.pi + 1e-9).astype(np.float64)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=15)
x = np.array(2*np.pi + 1e-15).astype(np.float64)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=15)
if hasattr(np, 'float128'):
# No float128 available in 32-bit numpy
x = np.array(2*np.pi + 1e-12).astype(np.float128)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=19)
n_even = [2, 4, 24]
x = np.array(2*np.pi + 1e-9).astype(np.float64)
assert_almost_equal(special.diric(x, n_even), -1.0, decimal=15)
# Test at some values not near a multiple of pi
x = np.arange(0.2*np.pi, 1.0*np.pi, 0.2*np.pi)
octave_result = [0.872677996249965, 0.539344662916632,
0.127322003750035, -0.206011329583298]
assert_almost_equal(special.diric(x, 3), octave_result, decimal=15)
def test_diric_broadcasting(self):
x = np.arange(5)
n = np.array([1, 3, 7])
assert_(special.diric(x[:, np.newaxis], n).shape == (x.size, n.size))
def test_ellipe(self):
assert_equal(cephes.ellipe(1),1.0)
def test_ellipeinc(self):
assert_equal(cephes.ellipeinc(0,1),0.0)
def test_ellipj(self):
cephes.ellipj(0,1)
def test_ellipk(self):
assert_allclose(ellipk(0), pi/2)
def test_ellipkinc(self):
assert_equal(cephes.ellipkinc(0,0),0.0)
def test_erf(self):
assert_equal(cephes.erf(0),0.0)
def test_erfc(self):
assert_equal(cephes.erfc(0),1.0)
def test_exp1(self):
cephes.exp1(1)
def test_expi(self):
cephes.expi(1)
def test_expn(self):
cephes.expn(1,1)
def test_exp1_reg(self):
# Regression for #834
a = cephes.exp1(-complex(19.9999990))
b = cephes.exp1(-complex(19.9999991))
assert_array_almost_equal(a.imag, b.imag)
def test_exp10(self):
assert_approx_equal(cephes.exp10(2),100.0)
def test_exp2(self):
assert_equal(cephes.exp2(2),4.0)
def test_expm1(self):
assert_equal(cephes.expm1(0),0.0)
def test_fdtr(self):
assert_equal(cephes.fdtr(1,1,0),0.0)
def test_fdtrc(self):
assert_equal(cephes.fdtrc(1,1,0),1.0)
def test_fdtri(self):
# cephes.fdtri(1,1,0.5) #BUG: gives NaN, should be 1
assert_allclose(cephes.fdtri(1, 1, [0.499, 0.501]),
array([0.9937365, 1.00630298]), rtol=1e-6)
def test_fdtridfd(self):
assert_equal(cephes.fdtridfd(1,0,0),5.0)
def test_fresnel(self):
assert_equal(cephes.fresnel(0),(0.0,0.0))
def test_gamma(self):
assert_equal(cephes.gamma(5),24.0)
def test_gammainc(self):
assert_equal(cephes.gammainc(5,0),0.0)
def test_gammaincc(self):
assert_equal(cephes.gammaincc(5,0),1.0)
def test_gammainccinv(self):
assert_equal(cephes.gammainccinv(5,1),0.0)
def test_gammaln(self):
cephes.gammaln(10)
def test_gammasgn(self):
vals = np.array([-4, -3.5, -2.3, 1, 4.2], np.float64)
assert_array_equal(cephes.gammasgn(vals), np.sign(cephes.rgamma(vals)))
def test_gdtr(self):
assert_equal(cephes.gdtr(1,1,0),0.0)
def test_gdtrc(self):
assert_equal(cephes.gdtrc(1,1,0),1.0)
def test_gdtria(self):
assert_equal(cephes.gdtria(0,1,1),0.0)
def test_gdtrib(self):
cephes.gdtrib(1,0,1)
# assert_equal(cephes.gdtrib(1,0,1),5.0)
def test_gdtrix(self):
cephes.gdtrix(1,1,.1)
def test_hankel1(self):
cephes.hankel1(1,1)
def test_hankel1e(self):
cephes.hankel1e(1,1)
def test_hankel2(self):
cephes.hankel2(1,1)
def test_hankel2e(self):
cephes.hankel2e(1,1)
def test_hyp1f1(self):
assert_approx_equal(cephes.hyp1f1(1,1,1), exp(1.0))
assert_approx_equal(cephes.hyp1f1(3,4,-6), 0.026056422099537251095)
cephes.hyp1f1(1,1,1)
def test_hyp1f2(self):
cephes.hyp1f2(1,1,1,1)
def test_hyp2f0(self):
cephes.hyp2f0(1,1,1,1)
def test_hyp2f1(self):
assert_equal(cephes.hyp2f1(1,1,1,0),1.0)
def test_hyp3f0(self):
assert_equal(cephes.hyp3f0(1,1,1,0),(1.0,0.0))
def test_hyperu(self):
assert_equal(cephes.hyperu(0,1,1),1.0)
def test_i0(self):
assert_equal(cephes.i0(0),1.0)
def test_i0e(self):
assert_equal(cephes.i0e(0),1.0)
def test_i1(self):
assert_equal(cephes.i1(0),0.0)
def test_i1e(self):
assert_equal(cephes.i1e(0),0.0)
def test_it2i0k0(self):
cephes.it2i0k0(1)
def test_it2j0y0(self):
cephes.it2j0y0(1)
def test_it2struve0(self):
cephes.it2struve0(1)
def test_itairy(self):
cephes.itairy(1)
def test_iti0k0(self):
assert_equal(cephes.iti0k0(0),(0.0,0.0))
def test_itj0y0(self):
assert_equal(cephes.itj0y0(0),(0.0,0.0))
def test_itmodstruve0(self):
assert_equal(cephes.itmodstruve0(0),0.0)
def test_itstruve0(self):
assert_equal(cephes.itstruve0(0),0.0)
def test_iv(self):
assert_equal(cephes.iv(1,0),0.0)
def _check_ive(self):
assert_equal(cephes.ive(1,0),0.0)
def test_j0(self):
assert_equal(cephes.j0(0),1.0)
def test_j1(self):
assert_equal(cephes.j1(0),0.0)
def test_jn(self):
assert_equal(cephes.jn(0,0),1.0)
def test_jv(self):
assert_equal(cephes.jv(0,0),1.0)
def _check_jve(self):
assert_equal(cephes.jve(0,0),1.0)
def test_k0(self):
cephes.k0(2)
def test_k0e(self):
cephes.k0e(2)
def test_k1(self):
cephes.k1(2)
def test_k1e(self):
cephes.k1e(2)
def test_kei(self):
cephes.kei(2)
def test_keip(self):
assert_equal(cephes.keip(0),0.0)
def test_ker(self):
cephes.ker(2)
def test_kerp(self):
cephes.kerp(2)
def _check_kelvin(self):
cephes.kelvin(2)
def test_kn(self):
cephes.kn(1,1)
def test_kolmogi(self):
assert_equal(cephes.kolmogi(1),0.0)
assert_(np.isnan(cephes.kolmogi(np.nan)))
def test_kolmogorov(self):
assert_equal(cephes.kolmogorov(0),1.0)
def _check_kv(self):
cephes.kv(1,1)
def _check_kve(self):
cephes.kve(1,1)
def test_log1p(self):
assert_equal(cephes.log1p(0),0.0)
def test_lpmv(self):
assert_equal(cephes.lpmv(0,0,1),1.0)
def test_mathieu_a(self):
assert_equal(cephes.mathieu_a(1,0),1.0)
def test_mathieu_b(self):
assert_equal(cephes.mathieu_b(1,0),1.0)
def test_mathieu_cem(self):
assert_equal(cephes.mathieu_cem(1,0,0),(1.0,0.0))
# Test AMS 20.2.27
@np.vectorize
def ce_smallq(m, q, z):
z *= np.pi/180
if m == 0:
return 2**(-0.5) * (1 - .5*q*cos(2*z)) # + O(q^2)
elif m == 1:
return cos(z) - q/8 * cos(3*z) # + O(q^2)
elif m == 2:
return cos(2*z) - q*(cos(4*z)/12 - 1/4) # + O(q^2)
else:
return cos(m*z) - q*(cos((m+2)*z)/(4*(m+1)) - cos((m-2)*z)/(4*(m-1))) # + O(q^2)
m = np.arange(0, 100)
q = np.r_[0, np.logspace(-30, -9, 10)]
assert_allclose(cephes.mathieu_cem(m[:,None], q[None,:], 0.123)[0],
ce_smallq(m[:,None], q[None,:], 0.123),
rtol=1e-14, atol=0)
def test_mathieu_sem(self):
assert_equal(cephes.mathieu_sem(1,0,0),(0.0,1.0))
# Test AMS 20.2.27
@np.vectorize
def se_smallq(m, q, z):
z *= np.pi/180
if m == 1:
return sin(z) - q/8 * sin(3*z) # + O(q^2)
elif m == 2:
return sin(2*z) - q*sin(4*z)/12 # + O(q^2)
else:
return sin(m*z) - q*(sin((m+2)*z)/(4*(m+1)) - sin((m-2)*z)/(4*(m-1))) # + O(q^2)
m = np.arange(1, 100)
q = np.r_[0, np.logspace(-30, -9, 10)]
assert_allclose(cephes.mathieu_sem(m[:,None], q[None,:], 0.123)[0],
se_smallq(m[:,None], q[None,:], 0.123),
rtol=1e-14, atol=0)
def test_mathieu_modcem1(self):
assert_equal(cephes.mathieu_modcem1(1,0,0),(0.0,0.0))
def test_mathieu_modcem2(self):
cephes.mathieu_modcem2(1,1,1)
# Test reflection relation AMS 20.6.19
m = np.arange(0, 4)[:,None,None]
q = np.r_[np.logspace(-2, 2, 10)][None,:,None]
z = np.linspace(0, 1, 7)[None,None,:]
y1 = cephes.mathieu_modcem2(m, q, -z)[0]
fr = -cephes.mathieu_modcem2(m, q, 0)[0] / cephes.mathieu_modcem1(m, q, 0)[0]
y2 = -cephes.mathieu_modcem2(m, q, z)[0] - 2*fr*cephes.mathieu_modcem1(m, q, z)[0]
assert_allclose(y1, y2, rtol=1e-10)
def test_mathieu_modsem1(self):
assert_equal(cephes.mathieu_modsem1(1,0,0),(0.0,0.0))
def test_mathieu_modsem2(self):
cephes.mathieu_modsem2(1,1,1)
# Test reflection relation AMS 20.6.20
m = np.arange(1, 4)[:,None,None]
q = np.r_[np.logspace(-2, 2, 10)][None,:,None]
z = np.linspace(0, 1, 7)[None,None,:]
y1 = cephes.mathieu_modsem2(m, q, -z)[0]
fr = cephes.mathieu_modsem2(m, q, 0)[1] / cephes.mathieu_modsem1(m, q, 0)[1]
y2 = cephes.mathieu_modsem2(m, q, z)[0] - 2*fr*cephes.mathieu_modsem1(m, q, z)[0]
assert_allclose(y1, y2, rtol=1e-10)
def test_mathieu_overflow(self):
# Check that these return NaNs instead of causing a SEGV
assert_equal(cephes.mathieu_cem(10000, 0, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_sem(10000, 0, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_cem(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_sem(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modcem1(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modsem1(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modcem2(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modsem2(10000, 1.5, 1.3), (np.nan, np.nan))
def test_mathieu_ticket_1847(self):
# Regression test --- this call had some out-of-bounds access
# and could return nan occasionally
for k in range(60):
v = cephes.mathieu_modsem2(2, 100, -1)
# Values from ACM TOMS 804 (derivate by numerical differentiation)
assert_allclose(v[0], 0.1431742913063671074347, rtol=1e-10)
assert_allclose(v[1], 0.9017807375832909144719, rtol=1e-4)
def test_modfresnelm(self):
cephes.modfresnelm(0)
def test_modfresnelp(self):
cephes.modfresnelp(0)
def _check_modstruve(self):
assert_equal(cephes.modstruve(1,0),0.0)
def test_nbdtr(self):
assert_equal(cephes.nbdtr(1,1,1),1.0)
def test_nbdtrc(self):
assert_equal(cephes.nbdtrc(1,1,1),0.0)
def test_nbdtri(self):
assert_equal(cephes.nbdtri(1,1,1),1.0)
def __check_nbdtrik(self):
cephes.nbdtrik(1,.4,.5)
def test_nbdtrin(self):
assert_equal(cephes.nbdtrin(1,0,0),5.0)
def test_ncfdtr(self):
assert_equal(cephes.ncfdtr(1,1,1,0),0.0)
def test_ncfdtri(self):
assert_equal(cephes.ncfdtri(1,1,1,0),0.0)
def test_ncfdtridfd(self):
cephes.ncfdtridfd(1,0.5,0,1)
def __check_ncfdtridfn(self):
cephes.ncfdtridfn(1,0.5,0,1)
def __check_ncfdtrinc(self):
cephes.ncfdtrinc(1,0.5,0,1)
def test_nctdtr(self):
assert_equal(cephes.nctdtr(1,0,0),0.5)
assert_equal(cephes.nctdtr(9, 65536, 45), 0.0)
assert_approx_equal(cephes.nctdtr(np.inf, 1., 1.), 0.5, 5)
assert_(np.isnan(cephes.nctdtr(2., np.inf, 10.)))
assert_approx_equal(cephes.nctdtr(2., 1., np.inf), 1.)
assert_(np.isnan(cephes.nctdtr(np.nan, 1., 1.)))
assert_(np.isnan(cephes.nctdtr(2., np.nan, 1.)))
assert_(np.isnan(cephes.nctdtr(2., 1., np.nan)))
def __check_nctdtridf(self):
cephes.nctdtridf(1,0.5,0)
def test_nctdtrinc(self):
cephes.nctdtrinc(1,0,0)
def test_nctdtrit(self):
cephes.nctdtrit(.1,0.2,.5)
def test_ndtr(self):
assert_equal(cephes.ndtr(0), 0.5)
assert_almost_equal(cephes.ndtr(1), 0.84134474606)
def test_ndtri(self):
assert_equal(cephes.ndtri(0.5),0.0)
def test_nrdtrimn(self):
assert_approx_equal(cephes.nrdtrimn(0.5,1,1),1.0)
def test_nrdtrisd(self):
assert_tol_equal(cephes.nrdtrisd(0.5,0.5,0.5), 0.0,
atol=0, rtol=0)
def test_obl_ang1(self):
cephes.obl_ang1(1,1,1,0)
def test_obl_ang1_cv(self):
result = cephes.obl_ang1_cv(1,1,1,1,0)
assert_almost_equal(result[0],1.0)
assert_almost_equal(result[1],0.0)
def _check_obl_cv(self):
assert_equal(cephes.obl_cv(1,1,0),2.0)
def test_obl_rad1(self):
cephes.obl_rad1(1,1,1,0)
def test_obl_rad1_cv(self):
cephes.obl_rad1_cv(1,1,1,1,0)
def test_obl_rad2(self):
cephes.obl_rad2(1,1,1,0)
def test_obl_rad2_cv(self):
cephes.obl_rad2_cv(1,1,1,1,0)
def test_pbdv(self):
assert_equal(cephes.pbdv(1,0),(0.0,1.0))
def test_pbvv(self):
cephes.pbvv(1,0)
def test_pbwa(self):
cephes.pbwa(1,0)
def test_pdtr(self):
val = cephes.pdtr(0, 1)
assert_almost_equal(val, np.exp(-1))
# Edge case: m = 0.
val = cephes.pdtr([0, 1, 2], 0.0)
assert_array_equal(val, [1, 1, 1])
def test_pdtrc(self):
val = cephes.pdtrc(0, 1)
assert_almost_equal(val, 1 - np.exp(-1))
# Edge case: m = 0.
val = cephes.pdtrc([0, 1, 2], 0.0)
assert_array_equal(val, [0, 0, 0])
def test_pdtri(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
cephes.pdtri(0.5,0.5)
def test_pdtrik(self):
k = cephes.pdtrik(0.5, 1)
assert_almost_equal(cephes.gammaincc(k + 1, 1), 0.5)
# Edge case: m = 0 or very small.
k = cephes.pdtrik([[0], [0.25], [0.95]], [0, 1e-20, 1e-6])
assert_array_equal(k, np.zeros((3, 3)))
def test_pro_ang1(self):
cephes.pro_ang1(1,1,1,0)
def test_pro_ang1_cv(self):
assert_array_almost_equal(cephes.pro_ang1_cv(1,1,1,1,0),
array((1.0,0.0)))
def _check_pro_cv(self):
assert_equal(cephes.pro_cv(1,1,0),2.0)
def test_pro_rad1(self):
cephes.pro_rad1(1,1,1,0.1)
def test_pro_rad1_cv(self):
cephes.pro_rad1_cv(1,1,1,1,0)
def test_pro_rad2(self):
cephes.pro_rad2(1,1,1,0)
def test_pro_rad2_cv(self):
cephes.pro_rad2_cv(1,1,1,1,0)
def test_psi(self):
cephes.psi(1)
def test_radian(self):
assert_equal(cephes.radian(0,0,0),0)
def test_rgamma(self):
assert_equal(cephes.rgamma(1),1.0)
def test_round(self):
assert_equal(cephes.round(3.4),3.0)
assert_equal(cephes.round(-3.4),-3.0)
assert_equal(cephes.round(3.6),4.0)
assert_equal(cephes.round(-3.6),-4.0)
assert_equal(cephes.round(3.5),4.0)
assert_equal(cephes.round(-3.5),-4.0)
def test_shichi(self):
cephes.shichi(1)
def test_sici(self):
cephes.sici(1)
s, c = cephes.sici(np.inf)
assert_almost_equal(s, np.pi * 0.5)
assert_almost_equal(c, 0)
s, c = cephes.sici(-np.inf)
assert_almost_equal(s, -np.pi * 0.5)
assert_(np.isnan(c), "cosine integral(-inf) is not nan")
def test_sindg(self):
assert_equal(cephes.sindg(90),1.0)
def test_smirnov(self):
assert_equal(cephes.smirnov(1,.1),0.9)
assert_(np.isnan(cephes.smirnov(1,np.nan)))
def test_smirnovi(self):
assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.4)),0.4)
assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.6)),0.6)
assert_(np.isnan(cephes.smirnovi(1,np.nan)))
def test_spence(self):
assert_equal(cephes.spence(1),0.0)
def test_stdtr(self):
assert_equal(cephes.stdtr(1,0),0.5)
assert_almost_equal(cephes.stdtr(1,1), 0.75)
assert_almost_equal(cephes.stdtr(1,2), 0.852416382349)
def test_stdtridf(self):
cephes.stdtridf(0.7,1)
def test_stdtrit(self):
cephes.stdtrit(1,0.7)
def test_struve(self):
assert_equal(cephes.struve(0,0),0.0)
def test_tandg(self):
assert_equal(cephes.tandg(45),1.0)
def test_tklmbda(self):
assert_almost_equal(cephes.tklmbda(1,1),1.0)
def test_y0(self):
cephes.y0(1)
def test_y1(self):
cephes.y1(1)
def test_yn(self):
cephes.yn(1,1)
def test_yv(self):
cephes.yv(1,1)
def _check_yve(self):
cephes.yve(1,1)
def test_zeta(self):
cephes.zeta(2,2)
def test_zetac(self):
assert_equal(cephes.zetac(0),-1.5)
def test_wofz(self):
z = [complex(624.2,-0.26123), complex(-0.4,3.), complex(0.6,2.),
complex(-1.,1.), complex(-1.,-9.), complex(-1.,9.),
complex(-0.0000000234545,1.1234), complex(-3.,5.1),
complex(-53,30.1), complex(0.0,0.12345),
complex(11,1), complex(-22,-2), complex(9,-28),
complex(21,-33), complex(1e5,1e5), complex(1e14,1e14)
]
w = [
complex(-3.78270245518980507452677445620103199303131110e-7,
0.000903861276433172057331093754199933411710053155),
complex(0.1764906227004816847297495349730234591778719532788,
-0.02146550539468457616788719893991501311573031095617),
complex(0.2410250715772692146133539023007113781272362309451,
0.06087579663428089745895459735240964093522265589350),
complex(0.30474420525691259245713884106959496013413834051768,
-0.20821893820283162728743734725471561394145872072738),
complex(7.317131068972378096865595229600561710140617977e34,
8.321873499714402777186848353320412813066170427e34),
complex(0.0615698507236323685519612934241429530190806818395,
-0.00676005783716575013073036218018565206070072304635),
complex(0.3960793007699874918961319170187598400134746631,
-5.593152259116644920546186222529802777409274656e-9),
complex(0.08217199226739447943295069917990417630675021771804,
-0.04701291087643609891018366143118110965272615832184),
complex(0.00457246000350281640952328010227885008541748668738,
-0.00804900791411691821818731763401840373998654987934),
complex(0.8746342859608052666092782112565360755791467973338452,
0.),
complex(0.00468190164965444174367477874864366058339647648741,
0.0510735563901306197993676329845149741675029197050),
complex(-0.0023193175200187620902125853834909543869428763219,
-0.025460054739731556004902057663500272721780776336),
complex(9.11463368405637174660562096516414499772662584e304,
3.97101807145263333769664875189354358563218932e305),
complex(-4.4927207857715598976165541011143706155432296e281,
-2.8019591213423077494444700357168707775769028e281),
complex(2.820947917809305132678577516325951485807107151e-6,
2.820947917668257736791638444590253942253354058e-6),
complex(2.82094791773878143474039725787438662716372268e-15,
2.82094791773878143474039725773333923127678361e-15)
]
assert_func_equal(cephes.wofz, w, z, rtol=1e-13)
class TestAiry(TestCase):
def test_airy(self):
# This tests the airy function to ensure 8 place accuracy in computation
x = special.airy(.99)
assert_array_almost_equal(x,array([0.13689066,-0.16050153,1.19815925,0.92046818]),8)
x = special.airy(.41)
assert_array_almost_equal(x,array([0.25238916,-.23480512,0.80686202,0.51053919]),8)
x = special.airy(-.36)
assert_array_almost_equal(x,array([0.44508477,-0.23186773,0.44939534,0.48105354]),8)
def test_airye(self):
a = special.airye(0.01)
b = special.airy(0.01)
b1 = [None]*4
for n in range(2):
b1[n] = b[n]*exp(2.0/3.0*0.01*sqrt(0.01))
for n in range(2,4):
b1[n] = b[n]*exp(-abs(real(2.0/3.0*0.01*sqrt(0.01))))
assert_array_almost_equal(a,b1,6)
def test_bi_zeros(self):
bi = special.bi_zeros(2)
bia = (array([-1.17371322, -3.2710930]),
array([-2.29443968, -4.07315509]),
array([-0.45494438, 0.39652284]),
array([0.60195789, -0.76031014]))
assert_array_almost_equal(bi,bia,4)
bi = special.bi_zeros(5)
assert_array_almost_equal(bi[0],array([-1.173713222709127,
-3.271093302836352,
-4.830737841662016,
-6.169852128310251,
-7.376762079367764]),11)
assert_array_almost_equal(bi[1],array([-2.294439682614122,
-4.073155089071828,
-5.512395729663599,
-6.781294445990305,
-7.940178689168587]),10)
assert_array_almost_equal(bi[2],array([-0.454944383639657,
0.396522836094465,
-0.367969161486959,
0.349499116831805,
-0.336026240133662]),11)
assert_array_almost_equal(bi[3],array([0.601957887976239,
-0.760310141492801,
0.836991012619261,
-0.88947990142654,
0.929983638568022]),10)
def test_ai_zeros(self):
ai = special.ai_zeros(1)
assert_array_almost_equal(ai,(array([-2.33810741]),
array([-1.01879297]),
array([0.5357]),
array([0.7012])),4)
def test_ai_zeros_big(self):
z, zp, ai_zpx, aip_zx = special.ai_zeros(50000)
ai_z, aip_z, _, _ = special.airy(z)
ai_zp, aip_zp, _, _ = special.airy(zp)
ai_envelope = 1/abs(z)**(1./4)
aip_envelope = abs(zp)**(1./4)
# Check values
assert_allclose(ai_zpx, ai_zp, rtol=1e-10)
assert_allclose(aip_zx, aip_z, rtol=1e-10)
# Check they are zeros
assert_allclose(ai_z/ai_envelope, 0, atol=1e-10, rtol=0)
assert_allclose(aip_zp/aip_envelope, 0, atol=1e-10, rtol=0)
# Check first zeros, DLMF 9.9.1
assert_allclose(z[:6],
[-2.3381074105, -4.0879494441, -5.5205598281,
-6.7867080901, -7.9441335871, -9.0226508533], rtol=1e-10)
assert_allclose(zp[:6],
[-1.0187929716, -3.2481975822, -4.8200992112,
-6.1633073556, -7.3721772550, -8.4884867340], rtol=1e-10)
def test_bi_zeros_big(self):
z, zp, bi_zpx, bip_zx = special.bi_zeros(50000)
_, _, bi_z, bip_z = special.airy(z)
_, _, bi_zp, bip_zp = special.airy(zp)
bi_envelope = 1/abs(z)**(1./4)
bip_envelope = abs(zp)**(1./4)
# Check values
assert_allclose(bi_zpx, bi_zp, rtol=1e-10)
assert_allclose(bip_zx, bip_z, rtol=1e-10)
# Check they are zeros
assert_allclose(bi_z/bi_envelope, 0, atol=1e-10, rtol=0)
assert_allclose(bip_zp/bip_envelope, 0, atol=1e-10, rtol=0)
# Check first zeros, DLMF 9.9.2
assert_allclose(z[:6],
[-1.1737132227, -3.2710933028, -4.8307378417,
-6.1698521283, -7.3767620794, -8.4919488465], rtol=1e-10)
assert_allclose(zp[:6],
[-2.2944396826, -4.0731550891, -5.5123957297,
-6.7812944460, -7.9401786892, -9.0195833588], rtol=1e-10)
class TestAssocLaguerre(TestCase):
def test_assoc_laguerre(self):
a1 = special.genlaguerre(11,1)
a2 = special.assoc_laguerre(.2,11,1)
assert_array_almost_equal(a2,a1(.2),8)
a2 = special.assoc_laguerre(1,11,1)
assert_array_almost_equal(a2,a1(1),8)
class TestBesselpoly(TestCase):
def test_besselpoly(self):
pass
class TestKelvin(TestCase):
def test_bei(self):
mbei = special.bei(2)
assert_almost_equal(mbei, 0.9722916273066613,5) # this may not be exact
def test_beip(self):
mbeip = special.beip(2)
assert_almost_equal(mbeip,0.91701361338403631,5) # this may not be exact
def test_ber(self):
mber = special.ber(2)
assert_almost_equal(mber,0.75173418271380821,5) # this may not be exact
def test_berp(self):
mberp = special.berp(2)
assert_almost_equal(mberp,-0.49306712470943909,5) # this may not be exact
def test_bei_zeros(self):
# Abramowitz & Stegun, Table 9.12
bi = special.bei_zeros(5)
assert_array_almost_equal(bi,array([5.02622,
9.45541,
13.89349,
18.33398,
22.77544]),4)
def test_beip_zeros(self):
bip = special.beip_zeros(5)
assert_array_almost_equal(bip,array([3.772673304934953,
8.280987849760042,
12.742147523633703,
17.193431752512542,
21.641143941167325]),8)
def test_ber_zeros(self):
ber = special.ber_zeros(5)
assert_array_almost_equal(ber,array([2.84892,
7.23883,
11.67396,
16.11356,
20.55463]),4)
def test_berp_zeros(self):
brp = special.berp_zeros(5)
assert_array_almost_equal(brp,array([6.03871,
10.51364,
14.96844,
19.41758,
23.86430]),4)
def test_kelvin(self):
mkelv = special.kelvin(2)
assert_array_almost_equal(mkelv,(special.ber(2) + special.bei(2)*1j,
special.ker(2) + special.kei(2)*1j,
special.berp(2) + special.beip(2)*1j,
special.kerp(2) + special.keip(2)*1j),8)
def test_kei(self):
mkei = special.kei(2)
assert_almost_equal(mkei,-0.20240006776470432,5)
def test_keip(self):
mkeip = special.keip(2)
assert_almost_equal(mkeip,0.21980790991960536,5)
def test_ker(self):
mker = special.ker(2)
assert_almost_equal(mker,-0.041664513991509472,5)
def test_kerp(self):
mkerp = special.kerp(2)
assert_almost_equal(mkerp,-0.10660096588105264,5)
def test_kei_zeros(self):
kei = special.kei_zeros(5)
assert_array_almost_equal(kei,array([3.91467,
8.34422,
12.78256,
17.22314,
21.66464]),4)
def test_keip_zeros(self):
keip = special.keip_zeros(5)
assert_array_almost_equal(keip,array([4.93181,
9.40405,
13.85827,
18.30717,
22.75379]),4)
# numbers come from 9.9 of A&S pg. 381
def test_kelvin_zeros(self):
tmp = special.kelvin_zeros(5)
berz,beiz,kerz,keiz,berpz,beipz,kerpz,keipz = tmp
assert_array_almost_equal(berz,array([2.84892,
7.23883,
11.67396,
16.11356,
20.55463]),4)
assert_array_almost_equal(beiz,array([5.02622,
9.45541,
13.89349,
18.33398,
22.77544]),4)
assert_array_almost_equal(kerz,array([1.71854,
6.12728,
10.56294,
15.00269,
19.44382]),4)
assert_array_almost_equal(keiz,array([3.91467,
8.34422,
12.78256,
17.22314,
21.66464]),4)
assert_array_almost_equal(berpz,array([6.03871,
10.51364,
14.96844,
19.41758,
23.86430]),4)
assert_array_almost_equal(beipz,array([3.77267,
# table from 1927 had 3.77320
# but this is more accurate
8.28099,
12.74215,
17.19343,
21.64114]),4)
assert_array_almost_equal(kerpz,array([2.66584,
7.17212,
11.63218,
16.08312,
20.53068]),4)
assert_array_almost_equal(keipz,array([4.93181,
9.40405,
13.85827,
18.30717,
22.75379]),4)
def test_ker_zeros(self):
ker = special.ker_zeros(5)
assert_array_almost_equal(ker,array([1.71854,
6.12728,
10.56294,
15.00269,
19.44381]),4)
def test_kerp_zeros(self):
kerp = special.kerp_zeros(5)
assert_array_almost_equal(kerp,array([2.66584,
7.17212,
11.63218,
16.08312,
20.53068]),4)
class TestBernoulli(TestCase):
def test_bernoulli(self):
brn = special.bernoulli(5)
assert_array_almost_equal(brn,array([1.0000,
-0.5000,
0.1667,
0.0000,
-0.0333,
0.0000]),4)
class TestBeta(TestCase):
def test_beta(self):
bet = special.beta(2,4)
betg = (special.gamma(2)*special.gamma(4))/special.gamma(6)
assert_almost_equal(bet,betg,8)
def test_betaln(self):
betln = special.betaln(2,4)
bet = log(abs(special.beta(2,4)))
assert_almost_equal(betln,bet,8)
def test_betainc(self):
btinc = special.betainc(1,1,.2)
assert_almost_equal(btinc,0.2,8)
def test_betaincinv(self):
y = special.betaincinv(2,4,.5)
comp = special.betainc(2,4,y)
assert_almost_equal(comp,.5,5)
class TestCombinatorics(TestCase):
def test_comb(self):
assert_array_almost_equal(special.comb([10, 10], [3, 4]), [120., 210.])
assert_almost_equal(special.comb(10, 3), 120.)
assert_equal(special.comb(10, 3, exact=True), 120)
assert_equal(special.comb(10, 3, exact=True, repetition=True), 220)
def test_comb_with_np_int64(self):
n = 70
k = 30
np_n = np.int64(n)
np_k = np.int64(k)
assert_equal(special.comb(np_n, np_k, exact=True),
special.comb(n, k, exact=True))
def test_comb_zeros(self):
assert_equal(special.comb(2, 3, exact=True), 0)
assert_equal(special.comb(-1, 3, exact=True), 0)
assert_equal(special.comb(2, -1, exact=True), 0)
assert_equal(special.comb(2, -1, exact=False), 0)
assert_array_almost_equal(special.comb([2, -1, 2, 10], [3, 3, -1, 3]),
[0., 0., 0., 120.])
def test_perm(self):
assert_array_almost_equal(special.perm([10, 10], [3, 4]), [720., 5040.])
assert_almost_equal(special.perm(10, 3), 720.)
assert_equal(special.perm(10, 3, exact=True), 720)
def test_perm_zeros(self):
assert_equal(special.perm(2, 3, exact=True), 0)
assert_equal(special.perm(-1, 3, exact=True), 0)
assert_equal(special.perm(2, -1, exact=True), 0)
assert_equal(special.perm(2, -1, exact=False), 0)
assert_array_almost_equal(special.perm([2, -1, 2, 10], [3, 3, -1, 3]),
[0., 0., 0., 720.])
class TestTrigonometric(TestCase):
def test_cbrt(self):
cb = special.cbrt(27)
cbrl = 27**(1.0/3.0)
assert_approx_equal(cb,cbrl)
def test_cbrtmore(self):
cb1 = special.cbrt(27.9)
cbrl1 = 27.9**(1.0/3.0)
assert_almost_equal(cb1,cbrl1,8)
def test_cosdg(self):
cdg = special.cosdg(90)
cdgrl = cos(pi/2.0)
assert_almost_equal(cdg,cdgrl,8)
def test_cosdgmore(self):
cdgm = special.cosdg(30)
cdgmrl = cos(pi/6.0)
assert_almost_equal(cdgm,cdgmrl,8)
def test_cosm1(self):
cs = (special.cosm1(0),special.cosm1(.3),special.cosm1(pi/10))
csrl = (cos(0)-1,cos(.3)-1,cos(pi/10)-1)
assert_array_almost_equal(cs,csrl,8)
def test_cotdg(self):
ct = special.cotdg(30)
ctrl = tan(pi/6.0)**(-1)
assert_almost_equal(ct,ctrl,8)
def test_cotdgmore(self):
ct1 = special.cotdg(45)
ctrl1 = tan(pi/4.0)**(-1)
assert_almost_equal(ct1,ctrl1,8)
def test_specialpoints(self):
assert_almost_equal(special.cotdg(45), 1.0, 14)
assert_almost_equal(special.cotdg(-45), -1.0, 14)
assert_almost_equal(special.cotdg(90), 0.0, 14)
assert_almost_equal(special.cotdg(-90), 0.0, 14)
assert_almost_equal(special.cotdg(135), -1.0, 14)
assert_almost_equal(special.cotdg(-135), 1.0, 14)
assert_almost_equal(special.cotdg(225), 1.0, 14)
assert_almost_equal(special.cotdg(-225), -1.0, 14)
assert_almost_equal(special.cotdg(270), 0.0, 14)
assert_almost_equal(special.cotdg(-270), 0.0, 14)
assert_almost_equal(special.cotdg(315), -1.0, 14)
assert_almost_equal(special.cotdg(-315), 1.0, 14)
assert_almost_equal(special.cotdg(765), 1.0, 14)
def test_sinc(self):
# the sinc implementation and more extensive sinc tests are in numpy
assert_array_equal(special.sinc([0]), 1)
assert_equal(special.sinc(0.0), 1.0)
def test_sindg(self):
sn = special.sindg(90)
assert_equal(sn,1.0)
def test_sindgmore(self):
snm = special.sindg(30)
snmrl = sin(pi/6.0)
assert_almost_equal(snm,snmrl,8)
snm1 = special.sindg(45)
snmrl1 = sin(pi/4.0)
assert_almost_equal(snm1,snmrl1,8)
class TestTandg(TestCase):
def test_tandg(self):
tn = special.tandg(30)
tnrl = tan(pi/6.0)
assert_almost_equal(tn,tnrl,8)
def test_tandgmore(self):
tnm = special.tandg(45)
tnmrl = tan(pi/4.0)
assert_almost_equal(tnm,tnmrl,8)
tnm1 = special.tandg(60)
tnmrl1 = tan(pi/3.0)
assert_almost_equal(tnm1,tnmrl1,8)
def test_specialpoints(self):
assert_almost_equal(special.tandg(0), 0.0, 14)
assert_almost_equal(special.tandg(45), 1.0, 14)
assert_almost_equal(special.tandg(-45), -1.0, 14)
assert_almost_equal(special.tandg(135), -1.0, 14)
assert_almost_equal(special.tandg(-135), 1.0, 14)
assert_almost_equal(special.tandg(180), 0.0, 14)
assert_almost_equal(special.tandg(-180), 0.0, 14)
assert_almost_equal(special.tandg(225), 1.0, 14)
assert_almost_equal(special.tandg(-225), -1.0, 14)
assert_almost_equal(special.tandg(315), -1.0, 14)
assert_almost_equal(special.tandg(-315), 1.0, 14)
class TestEllip(TestCase):
def test_ellipj_nan(self):
"""Regression test for #912."""
special.ellipj(0.5, np.nan)
def test_ellipj(self):
el = special.ellipj(0.2,0)
rel = [sin(0.2),cos(0.2),1.0,0.20]
assert_array_almost_equal(el,rel,13)
def test_ellipk(self):
elk = special.ellipk(.2)
assert_almost_equal(elk,1.659623598610528,11)
assert_equal(special.ellipkm1(0.0), np.inf)
assert_equal(special.ellipkm1(1.0), pi/2)
assert_equal(special.ellipkm1(np.inf), 0.0)
assert_equal(special.ellipkm1(np.nan), np.nan)
assert_equal(special.ellipkm1(-1), np.nan)
assert_allclose(special.ellipk(-10), 0.7908718902387385)
def test_ellipkinc(self):
elkinc = special.ellipkinc(pi/2,.2)
elk = special.ellipk(0.2)
assert_almost_equal(elkinc,elk,15)
alpha = 20*pi/180
phi = 45*pi/180
m = sin(alpha)**2
elkinc = special.ellipkinc(phi,m)
assert_almost_equal(elkinc,0.79398143,8)
# From pg. 614 of A & S
assert_equal(special.ellipkinc(pi/2, 0.0), pi/2)
assert_equal(special.ellipkinc(pi/2, 1.0), np.inf)
assert_equal(special.ellipkinc(pi/2, -np.inf), 0.0)
assert_equal(special.ellipkinc(pi/2, np.nan), np.nan)
assert_equal(special.ellipkinc(pi/2, 2), np.nan)
assert_equal(special.ellipkinc(0, 0.5), 0.0)
assert_equal(special.ellipkinc(np.inf, 0.5), np.inf)
assert_equal(special.ellipkinc(-np.inf, 0.5), -np.inf)
assert_equal(special.ellipkinc(np.inf, np.inf), np.nan)
assert_equal(special.ellipkinc(np.inf, -np.inf), np.nan)
assert_equal(special.ellipkinc(-np.inf, -np.inf), np.nan)
assert_equal(special.ellipkinc(-np.inf, np.inf), np.nan)
assert_equal(special.ellipkinc(np.nan, 0.5), np.nan)
assert_equal(special.ellipkinc(np.nan, np.nan), np.nan)
assert_allclose(special.ellipkinc(0.38974112035318718, 1), 0.4, rtol=1e-14)
assert_allclose(special.ellipkinc(1.5707, -10), 0.79084284661724946)
def test_ellipkinc_2(self):
# Regression test for gh-3550
# ellipkinc(phi, mbad) was NaN and mvals[2:6] were twice the correct value
mbad = 0.68359375000000011
phi = 0.9272952180016123
m = np.nextafter(mbad, 0)
mvals = []
for j in range(10):
mvals.append(m)
m = np.nextafter(m, 1)
f = special.ellipkinc(phi, mvals)
assert_array_almost_equal_nulp(f, 1.0259330100195334 * np.ones_like(f), 1)
# this bug also appears at phi + n * pi for at least small n
f1 = special.ellipkinc(phi + pi, mvals)
assert_array_almost_equal_nulp(f1, 5.1296650500976675 * np.ones_like(f1), 2)
def test_ellipkinc_singular(self):
# ellipkinc(phi, 1) has closed form and is finite only for phi in (-pi/2, pi/2)
xlog = np.logspace(-300, -17, 25)
xlin = np.linspace(1e-17, 0.1, 25)
xlin2 = np.linspace(0.1, pi/2, 25, endpoint=False)
assert_allclose(special.ellipkinc(xlog, 1), np.arcsinh(np.tan(xlog)), rtol=1e14)
assert_allclose(special.ellipkinc(xlin, 1), np.arcsinh(np.tan(xlin)), rtol=1e14)
assert_allclose(special.ellipkinc(xlin2, 1), np.arcsinh(np.tan(xlin2)), rtol=1e14)
assert_equal(special.ellipkinc(np.pi/2, 1), np.inf)
assert_allclose(special.ellipkinc(-xlog, 1), np.arcsinh(np.tan(-xlog)), rtol=1e14)
assert_allclose(special.ellipkinc(-xlin, 1), np.arcsinh(np.tan(-xlin)), rtol=1e14)
assert_allclose(special.ellipkinc(-xlin2, 1), np.arcsinh(np.tan(-xlin2)), rtol=1e14)
assert_equal(special.ellipkinc(-np.pi/2, 1), np.inf)
def test_ellipe(self):
ele = special.ellipe(.2)
assert_almost_equal(ele,1.4890350580958529,8)
assert_equal(special.ellipe(0.0), pi/2)
assert_equal(special.ellipe(1.0), 1.0)
assert_equal(special.ellipe(-np.inf), np.inf)
assert_equal(special.ellipe(np.nan), np.nan)
assert_equal(special.ellipe(2), np.nan)
assert_allclose(special.ellipe(-10), 3.6391380384177689)
def test_ellipeinc(self):
eleinc = special.ellipeinc(pi/2,.2)
ele = special.ellipe(0.2)
assert_almost_equal(eleinc,ele,14)
# pg 617 of A & S
alpha, phi = 52*pi/180,35*pi/180
m = sin(alpha)**2
eleinc = special.ellipeinc(phi,m)
assert_almost_equal(eleinc, 0.58823065, 8)
assert_equal(special.ellipeinc(pi/2, 0.0), pi/2)
assert_equal(special.ellipeinc(pi/2, 1.0), 1.0)
assert_equal(special.ellipeinc(pi/2, -np.inf), np.inf)
assert_equal(special.ellipeinc(pi/2, np.nan), np.nan)
assert_equal(special.ellipeinc(pi/2, 2), np.nan)
assert_equal(special.ellipeinc(0, 0.5), 0.0)
assert_equal(special.ellipeinc(np.inf, 0.5), np.inf)
assert_equal(special.ellipeinc(-np.inf, 0.5), -np.inf)
assert_equal(special.ellipeinc(np.inf, -np.inf), np.inf)
assert_equal(special.ellipeinc(-np.inf, -np.inf), -np.inf)
assert_equal(special.ellipeinc(np.inf, np.inf), np.nan)
assert_equal(special.ellipeinc(-np.inf, np.inf), np.nan)
assert_equal(special.ellipeinc(np.nan, 0.5), np.nan)
assert_equal(special.ellipeinc(np.nan, np.nan), np.nan)
assert_allclose(special.ellipeinc(1.5707, -10), 3.6388185585822876)
def test_ellipeinc_2(self):
# Regression test for gh-3550
# ellipeinc(phi, mbad) was NaN and mvals[2:6] were twice the correct value
mbad = 0.68359375000000011
phi = 0.9272952180016123
m = np.nextafter(mbad, 0)
mvals = []
for j in range(10):
mvals.append(m)
m = np.nextafter(m, 1)
f = special.ellipeinc(phi, mvals)
assert_array_almost_equal_nulp(f, 0.84442884574781019 * np.ones_like(f), 2)
# this bug also appears at phi + n * pi for at least small n
f1 = special.ellipeinc(phi + pi, mvals)
assert_array_almost_equal_nulp(f1, 3.3471442287390509 * np.ones_like(f1), 4)
class TestErf(TestCase):
def test_erf(self):
er = special.erf(.25)
assert_almost_equal(er,0.2763263902,8)
def test_erf_zeros(self):
erz = special.erf_zeros(5)
erzr = array([1.45061616+1.88094300j,
2.24465928+2.61657514j,
2.83974105+3.17562810j,
3.33546074+3.64617438j,
3.76900557+4.06069723j])
assert_array_almost_equal(erz,erzr,4)
def _check_variant_func(self, func, other_func, rtol, atol=0):
np.random.seed(1234)
n = 10000
x = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1)
y = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1)
z = x + 1j*y
old_errors = np.seterr(all='ignore')
try:
w = other_func(z)
w_real = other_func(x).real
mask = np.isfinite(w)
w = w[mask]
z = z[mask]
mask = np.isfinite(w_real)
w_real = w_real[mask]
x = x[mask]
# test both real and complex variants
assert_func_equal(func, w, z, rtol=rtol, atol=atol)
assert_func_equal(func, w_real, x, rtol=rtol, atol=atol)
finally:
np.seterr(**old_errors)
def test_erfc_consistent(self):
self._check_variant_func(
cephes.erfc,
lambda z: 1 - cephes.erf(z),
rtol=1e-12,
atol=1e-14 # <- the test function loses precision
)
def test_erfcx_consistent(self):
self._check_variant_func(
cephes.erfcx,
lambda z: np.exp(z*z) * cephes.erfc(z),
rtol=1e-12
)
def test_erfi_consistent(self):
self._check_variant_func(
cephes.erfi,
lambda z: -1j * cephes.erf(1j*z),
rtol=1e-12
)
def test_dawsn_consistent(self):
self._check_variant_func(
cephes.dawsn,
lambda z: sqrt(pi)/2 * np.exp(-z*z) * cephes.erfi(z),
rtol=1e-12
)
def test_erfcinv(self):
i = special.erfcinv(1)
# Use assert_array_equal instead of assert_equal, so the comparsion
# of -0.0 and 0.0 doesn't fail.
assert_array_equal(i, 0)
def test_erfinv(self):
i = special.erfinv(0)
assert_equal(i,0)
def test_errprint(self):
a = special.errprint()
b = 1-a # a is the state 1-a inverts state
c = special.errprint(b) # returns last state 'a'
assert_equal(a,c)
d = special.errprint(a) # returns to original state
assert_equal(d,b) # makes sure state was returned
# assert_equal(d,1-a)
class TestEuler(TestCase):
def test_euler(self):
eu0 = special.euler(0)
eu1 = special.euler(1)
eu2 = special.euler(2) # just checking segfaults
assert_almost_equal(eu0[0],1,8)
assert_almost_equal(eu2[2],-1,8)
eu24 = special.euler(24)
mathworld = [1,1,5,61,1385,50521,2702765,199360981,
19391512145,2404879675441,
370371188237525,69348874393137901,
15514534163557086905]
correct = zeros((25,),'d')
for k in range(0,13):
if (k % 2):
correct[2*k] = -float(mathworld[k])
else:
correct[2*k] = float(mathworld[k])
olderr = np.seterr(all='ignore')
try:
err = nan_to_num((eu24-correct)/correct)
errmax = max(err)
finally:
np.seterr(**olderr)
assert_almost_equal(errmax, 0.0, 14)
class TestExp(TestCase):
def test_exp2(self):
ex = special.exp2(2)
exrl = 2**2
assert_equal(ex,exrl)
def test_exp2more(self):
exm = special.exp2(2.5)
exmrl = 2**(2.5)
assert_almost_equal(exm,exmrl,8)
def test_exp10(self):
ex = special.exp10(2)
exrl = 10**2
assert_approx_equal(ex,exrl)
def test_exp10more(self):
exm = special.exp10(2.5)
exmrl = 10**(2.5)
assert_almost_equal(exm,exmrl,8)
def test_expm1(self):
ex = (special.expm1(2),special.expm1(3),special.expm1(4))
exrl = (exp(2)-1,exp(3)-1,exp(4)-1)
assert_array_almost_equal(ex,exrl,8)
def test_expm1more(self):
ex1 = (special.expm1(2),special.expm1(2.1),special.expm1(2.2))
exrl1 = (exp(2)-1,exp(2.1)-1,exp(2.2)-1)
assert_array_almost_equal(ex1,exrl1,8)
class TestFactorialFunctions(TestCase):
def test_factorial(self):
assert_array_almost_equal([6., 24., 120.],
special.factorial([3, 4, 5], exact=False))
assert_equal(special.factorial(5, exact=True), 120)
def test_factorial2(self):
assert_array_almost_equal([105., 384., 945.],
special.factorial2([7, 8, 9], exact=False))
assert_equal(special.factorial2(7, exact=True), 105)
def test_factorialk(self):
assert_equal(special.factorialk(5, 1, exact=True), 120)
assert_equal(special.factorialk(5, 3, exact=True), 10)
class TestFresnel(TestCase):
def test_fresnel(self):
frs = array(special.fresnel(.5))
assert_array_almost_equal(frs,array([0.064732432859999287, 0.49234422587144644]),8)
# values from pg 329 Table 7.11 of A & S
# slightly corrected in 4th decimal place
def test_fresnel_zeros(self):
szo, czo = special.fresnel_zeros(5)
assert_array_almost_equal(szo,
array([2.0093+0.2885j,
2.8335+0.2443j,
3.4675+0.2185j,
4.0026+0.2009j,
4.4742+0.1877j]),3)
assert_array_almost_equal(czo,
array([1.7437+0.3057j,
2.6515+0.2529j,
3.3204+0.2240j,
3.8757+0.2047j,
4.3611+0.1907j]),3)
vals1 = special.fresnel(szo)[0]
vals2 = special.fresnel(czo)[1]
assert_array_almost_equal(vals1,0,14)
assert_array_almost_equal(vals2,0,14)
def test_fresnelc_zeros(self):
szo, czo = special.fresnel_zeros(6)
frc = special.fresnelc_zeros(6)
assert_array_almost_equal(frc,czo,12)
def test_fresnels_zeros(self):
szo, czo = special.fresnel_zeros(5)
frs = special.fresnels_zeros(5)
assert_array_almost_equal(frs,szo,12)
class TestGamma(TestCase):
def test_gamma(self):
gam = special.gamma(5)
assert_equal(gam,24.0)
def test_gammaln(self):
gamln = special.gammaln(3)
lngam = log(special.gamma(3))
assert_almost_equal(gamln,lngam,8)
def test_gammainc(self):
gama = special.gammainc(.5,.5)
assert_almost_equal(gama,.7,1)
def test_gammaincnan(self):
gama = special.gammainc(-1,1)
assert_(isnan(gama))
def test_gammainczero(self):
# bad arg but zero integration limit
gama = special.gammainc(-1,0)
assert_equal(gama,0.0)
def test_gammaincc(self):
gicc = special.gammaincc(.5,.5)
greal = 1 - special.gammainc(.5,.5)
assert_almost_equal(gicc,greal,8)
def test_gammainccnan(self):
gama = special.gammaincc(-1,1)
assert_(isnan(gama))
def test_gammainccinv(self):
gccinv = special.gammainccinv(.5,.5)
gcinv = special.gammaincinv(.5,.5)
assert_almost_equal(gccinv,gcinv,8)
@with_special_errors
def test_gammaincinv(self):
y = special.gammaincinv(.4,.4)
x = special.gammainc(.4,y)
assert_almost_equal(x,0.4,1)
y = special.gammainc(10, 0.05)
x = special.gammaincinv(10, 2.5715803516000736e-20)
assert_almost_equal(0.05, x, decimal=10)
assert_almost_equal(y, 2.5715803516000736e-20, decimal=10)
x = special.gammaincinv(50, 8.20754777388471303050299243573393e-18)
assert_almost_equal(11.0, x, decimal=10)
@with_special_errors
def test_975(self):
# Regression test for ticket #975 -- switch point in algorithm
# check that things work OK at the point, immediately next floats
# around it, and a bit further away
pts = [0.25,
np.nextafter(0.25, 0), 0.25 - 1e-12,
np.nextafter(0.25, 1), 0.25 + 1e-12]
for xp in pts:
y = special.gammaincinv(.4, xp)
x = special.gammainc(0.4, y)
assert_tol_equal(x, xp, rtol=1e-12)
def test_rgamma(self):
rgam = special.rgamma(8)
rlgam = 1/special.gamma(8)
assert_almost_equal(rgam,rlgam,8)
def test_infinity(self):
assert_(np.isinf(special.gamma(-1)))
assert_equal(special.rgamma(-1), 0)
class TestHankel(TestCase):
def test_negv1(self):
assert_almost_equal(special.hankel1(-3,2), -special.hankel1(3,2), 14)
def test_hankel1(self):
hank1 = special.hankel1(1,.1)
hankrl = (special.jv(1,.1) + special.yv(1,.1)*1j)
assert_almost_equal(hank1,hankrl,8)
def test_negv1e(self):
assert_almost_equal(special.hankel1e(-3,2), -special.hankel1e(3,2), 14)
def test_hankel1e(self):
hank1e = special.hankel1e(1,.1)
hankrle = special.hankel1(1,.1)*exp(-.1j)
assert_almost_equal(hank1e,hankrle,8)
def test_negv2(self):
assert_almost_equal(special.hankel2(-3,2), -special.hankel2(3,2), 14)
def test_hankel2(self):
hank2 = special.hankel2(1,.1)
hankrl2 = (special.jv(1,.1) - special.yv(1,.1)*1j)
assert_almost_equal(hank2,hankrl2,8)
def test_neg2e(self):
assert_almost_equal(special.hankel2e(-3,2), -special.hankel2e(3,2), 14)
def test_hankl2e(self):
hank2e = special.hankel2e(1,.1)
hankrl2e = special.hankel2e(1,.1)
assert_almost_equal(hank2e,hankrl2e,8)
class TestHyper(TestCase):
def test_h1vp(self):
h1 = special.h1vp(1,.1)
h1real = (special.jvp(1,.1) + special.yvp(1,.1)*1j)
assert_almost_equal(h1,h1real,8)
def test_h2vp(self):
h2 = special.h2vp(1,.1)
h2real = (special.jvp(1,.1) - special.yvp(1,.1)*1j)
assert_almost_equal(h2,h2real,8)
def test_hyp0f1(self):
# scalar input
assert_allclose(special.hyp0f1(2.5, 0.5), 1.21482702689997, rtol=1e-12)
assert_allclose(special.hyp0f1(2.5, 0), 1.0, rtol=1e-15)
# float input, expected values match mpmath
x = special.hyp0f1(3.0, [-1.5, -1, 0, 1, 1.5])
expected = np.array([0.58493659229143, 0.70566805723127, 1.0,
1.37789689539747, 1.60373685288480])
assert_allclose(x, expected, rtol=1e-12)
# complex input
x = special.hyp0f1(3.0, np.array([-1.5, -1, 0, 1, 1.5]) + 0.j)
assert_allclose(x, expected.astype(complex), rtol=1e-12)
# test broadcasting
x1 = [0.5, 1.5, 2.5]
x2 = [0, 1, 0.5]
x = special.hyp0f1(x1, x2)
expected = [1.0, 1.8134302039235093, 1.21482702689997]
assert_allclose(x, expected, rtol=1e-12)
x = special.hyp0f1(np.row_stack([x1] * 2), x2)
assert_allclose(x, np.row_stack([expected] * 2), rtol=1e-12)
assert_raises(ValueError, special.hyp0f1,
np.row_stack([x1] * 3), [0, 1])
def test_hyp1f1(self):
hyp1 = special.hyp1f1(.1,.1,.3)
assert_almost_equal(hyp1, 1.3498588075760032,7)
# test contributed by Moritz Deger (2008-05-29)
# http://projects.scipy.org/scipy/scipy/ticket/659
# reference data obtained from mathematica [ a, b, x, m(a,b,x)]:
# produced with test_hyp1f1.nb
ref_data = array([[-8.38132975e+00, -1.28436461e+01, -2.91081397e+01, 1.04178330e+04],
[2.91076882e+00, -6.35234333e+00, -1.27083993e+01, 6.68132725e+00],
[-1.42938258e+01, 1.80869131e-01, 1.90038728e+01, 1.01385897e+05],
[5.84069088e+00, 1.33187908e+01, 2.91290106e+01, 1.59469411e+08],
[-2.70433202e+01, -1.16274873e+01, -2.89582384e+01, 1.39900152e+24],
[4.26344966e+00, -2.32701773e+01, 1.91635759e+01, 6.13816915e+21],
[1.20514340e+01, -3.40260240e+00, 7.26832235e+00, 1.17696112e+13],
[2.77372955e+01, -1.99424687e+00, 3.61332246e+00, 3.07419615e+13],
[1.50310939e+01, -2.91198675e+01, -1.53581080e+01, -3.79166033e+02],
[1.43995827e+01, 9.84311196e+00, 1.93204553e+01, 2.55836264e+10],
[-4.08759686e+00, 1.34437025e+01, -1.42072843e+01, 1.70778449e+01],
[8.05595738e+00, -1.31019838e+01, 1.52180721e+01, 3.06233294e+21],
[1.81815804e+01, -1.42908793e+01, 9.57868793e+00, -2.84771348e+20],
[-2.49671396e+01, 1.25082843e+01, -1.71562286e+01, 2.36290426e+07],
[2.67277673e+01, 1.70315414e+01, 6.12701450e+00, 7.77917232e+03],
[2.49565476e+01, 2.91694684e+01, 6.29622660e+00, 2.35300027e+02],
[6.11924542e+00, -1.59943768e+00, 9.57009289e+00, 1.32906326e+11],
[-1.47863653e+01, 2.41691301e+01, -1.89981821e+01, 2.73064953e+03],
[2.24070483e+01, -2.93647433e+00, 8.19281432e+00, -6.42000372e+17],
[8.04042600e-01, 1.82710085e+01, -1.97814534e+01, 5.48372441e-01],
[1.39590390e+01, 1.97318686e+01, 2.37606635e+00, 5.51923681e+00],
[-4.66640483e+00, -2.00237930e+01, 7.40365095e+00, 4.50310752e+00],
[2.76821999e+01, -6.36563968e+00, 1.11533984e+01, -9.28725179e+23],
[-2.56764457e+01, 1.24544906e+00, 1.06407572e+01, 1.25922076e+01],
[3.20447808e+00, 1.30874383e+01, 2.26098014e+01, 2.03202059e+04],
[-1.24809647e+01, 4.15137113e+00, -2.92265700e+01, 2.39621411e+08],
[2.14778108e+01, -2.35162960e+00, -1.13758664e+01, 4.46882152e-01],
[-9.85469168e+00, -3.28157680e+00, 1.67447548e+01, -1.07342390e+07],
[1.08122310e+01, -2.47353236e+01, -1.15622349e+01, -2.91733796e+03],
[-2.67933347e+01, -3.39100709e+00, 2.56006986e+01, -5.29275382e+09],
[-8.60066776e+00, -8.02200924e+00, 1.07231926e+01, 1.33548320e+06],
[-1.01724238e-01, -1.18479709e+01, -2.55407104e+01, 1.55436570e+00],
[-3.93356771e+00, 2.11106818e+01, -2.57598485e+01, 2.13467840e+01],
[3.74750503e+00, 1.55687633e+01, -2.92841720e+01, 1.43873509e-02],
[6.99726781e+00, 2.69855571e+01, -1.63707771e+01, 3.08098673e-02],
[-2.31996011e+01, 3.47631054e+00, 9.75119815e-01, 1.79971073e-02],
[2.38951044e+01, -2.91460190e+01, -2.50774708e+00, 9.56934814e+00],
[1.52730825e+01, 5.77062507e+00, 1.21922003e+01, 1.32345307e+09],
[1.74673917e+01, 1.89723426e+01, 4.94903250e+00, 9.90859484e+01],
[1.88971241e+01, 2.86255413e+01, 5.52360109e-01, 1.44165360e+00],
[1.02002319e+01, -1.66855152e+01, -2.55426235e+01, 6.56481554e+02],
[-1.79474153e+01, 1.22210200e+01, -1.84058212e+01, 8.24041812e+05],
[-1.36147103e+01, 1.32365492e+00, -7.22375200e+00, 9.92446491e+05],
[7.57407832e+00, 2.59738234e+01, -1.34139168e+01, 3.64037761e-02],
[2.21110169e+00, 1.28012666e+01, 1.62529102e+01, 1.33433085e+02],
[-2.64297569e+01, -1.63176658e+01, -1.11642006e+01, -2.44797251e+13],
[-2.46622944e+01, -3.02147372e+00, 8.29159315e+00, -3.21799070e+05],
[-1.37215095e+01, -1.96680183e+01, 2.91940118e+01, 3.21457520e+12],
[-5.45566105e+00, 2.81292086e+01, 1.72548215e-01, 9.66973000e-01],
[-1.55751298e+00, -8.65703373e+00, 2.68622026e+01, -3.17190834e+16],
[2.45393609e+01, -2.70571903e+01, 1.96815505e+01, 1.80708004e+37],
[5.77482829e+00, 1.53203143e+01, 2.50534322e+01, 1.14304242e+06],
[-1.02626819e+01, 2.36887658e+01, -2.32152102e+01, 7.28965646e+02],
[-1.30833446e+00, -1.28310210e+01, 1.87275544e+01, -9.33487904e+12],
[5.83024676e+00, -1.49279672e+01, 2.44957538e+01, -7.61083070e+27],
[-2.03130747e+01, 2.59641715e+01, -2.06174328e+01, 4.54744859e+04],
[1.97684551e+01, -2.21410519e+01, -2.26728740e+01, 3.53113026e+06],
[2.73673444e+01, 2.64491725e+01, 1.57599882e+01, 1.07385118e+07],
[5.73287971e+00, 1.21111904e+01, 1.33080171e+01, 2.63220467e+03],
[-2.82751072e+01, 2.08605881e+01, 9.09838900e+00, -6.60957033e-07],
[1.87270691e+01, -1.74437016e+01, 1.52413599e+01, 6.59572851e+27],
[6.60681457e+00, -2.69449855e+00, 9.78972047e+00, -2.38587870e+12],
[1.20895561e+01, -2.51355765e+01, 2.30096101e+01, 7.58739886e+32],
[-2.44682278e+01, 2.10673441e+01, -1.36705538e+01, 4.54213550e+04],
[-4.50665152e+00, 3.72292059e+00, -4.83403707e+00, 2.68938214e+01],
[-7.46540049e+00, -1.08422222e+01, -1.72203805e+01, -2.09402162e+02],
[-2.00307551e+01, -7.50604431e+00, -2.78640020e+01, 4.15985444e+19],
[1.99890876e+01, 2.20677419e+01, -2.51301778e+01, 1.23840297e-09],
[2.03183823e+01, -7.66942559e+00, 2.10340070e+01, 1.46285095e+31],
[-2.90315825e+00, -2.55785967e+01, -9.58779316e+00, 2.65714264e-01],
[2.73960829e+01, -1.80097203e+01, -2.03070131e+00, 2.52908999e+02],
[-2.11708058e+01, -2.70304032e+01, 2.48257944e+01, 3.09027527e+08],
[2.21959758e+01, 4.00258675e+00, -1.62853977e+01, -9.16280090e-09],
[1.61661840e+01, -2.26845150e+01, 2.17226940e+01, -8.24774394e+33],
[-3.35030306e+00, 1.32670581e+00, 9.39711214e+00, -1.47303163e+01],
[7.23720726e+00, -2.29763909e+01, 2.34709682e+01, -9.20711735e+29],
[2.71013568e+01, 1.61951087e+01, -7.11388906e-01, 2.98750911e-01],
[8.40057933e+00, -7.49665220e+00, 2.95587388e+01, 6.59465635e+29],
[-1.51603423e+01, 1.94032322e+01, -7.60044357e+00, 1.05186941e+02],
[-8.83788031e+00, -2.72018313e+01, 1.88269907e+00, 1.81687019e+00],
[-1.87283712e+01, 5.87479570e+00, -1.91210203e+01, 2.52235612e+08],
[-5.61338513e-01, 2.69490237e+01, 1.16660111e-01, 9.97567783e-01],
[-5.44354025e+00, -1.26721408e+01, -4.66831036e+00, 1.06660735e-01],
[-2.18846497e+00, 2.33299566e+01, 9.62564397e+00, 3.03842061e-01],
[6.65661299e+00, -2.39048713e+01, 1.04191807e+01, 4.73700451e+13],
[-2.57298921e+01, -2.60811296e+01, 2.74398110e+01, -5.32566307e+11],
[-1.11431826e+01, -1.59420160e+01, -1.84880553e+01, -1.01514747e+02],
[6.50301931e+00, 2.59859051e+01, -2.33270137e+01, 1.22760500e-02],
[-1.94987891e+01, -2.62123262e+01, 3.90323225e+00, 1.71658894e+01],
[7.26164601e+00, -1.41469402e+01, 2.81499763e+01, -2.50068329e+31],
[-1.52424040e+01, 2.99719005e+01, -2.85753678e+01, 1.31906693e+04],
[5.24149291e+00, -1.72807223e+01, 2.22129493e+01, 2.50748475e+25],
[3.63207230e-01, -9.54120862e-02, -2.83874044e+01, 9.43854939e-01],
[-2.11326457e+00, -1.25707023e+01, 1.17172130e+00, 1.20812698e+00],
[2.48513582e+00, 1.03652647e+01, -1.84625148e+01, 6.47910997e-02],
[2.65395942e+01, 2.74794672e+01, 1.29413428e+01, 2.89306132e+05],
[-9.49445460e+00, 1.59930921e+01, -1.49596331e+01, 3.27574841e+02],
[-5.89173945e+00, 9.96742426e+00, 2.60318889e+01, -3.15842908e-01],
[-1.15387239e+01, -2.21433107e+01, -2.17686413e+01, 1.56724718e-01],
[-5.30592244e+00, -2.42752190e+01, 1.29734035e+00, 1.31985534e+00]])
for a,b,c,expected in ref_data:
result = special.hyp1f1(a,b,c)
assert_(abs(expected - result)/expected < 1e-4)
def test_hyp1f1_gh2957(self):
hyp1 = special.hyp1f1(0.5, 1.5, -709.7827128933)
hyp2 = special.hyp1f1(0.5, 1.5, -709.7827128934)
assert_almost_equal(hyp1, hyp2, 12)
def test_hyp1f2(self):
pass
def test_hyp2f0(self):
pass
def test_hyp2f1(self):
# a collection of special cases taken from AMS 55
values = [[0.5, 1, 1.5, 0.2**2, 0.5/0.2*log((1+0.2)/(1-0.2))],
[0.5, 1, 1.5, -0.2**2, 1./0.2*arctan(0.2)],
[1, 1, 2, 0.2, -1/0.2*log(1-0.2)],
[3, 3.5, 1.5, 0.2**2,
0.5/0.2/(-5)*((1+0.2)**(-5)-(1-0.2)**(-5))],
[-3, 3, 0.5, sin(0.2)**2, cos(2*3*0.2)],
[3, 4, 8, 1, special.gamma(8)*special.gamma(8-4-3)/special.gamma(8-3)/special.gamma(8-4)],
[3, 2, 3-2+1, -1, 1./2**3*sqrt(pi) *
special.gamma(1+3-2)/special.gamma(1+0.5*3-2)/special.gamma(0.5+0.5*3)],
[5, 2, 5-2+1, -1, 1./2**5*sqrt(pi) *
special.gamma(1+5-2)/special.gamma(1+0.5*5-2)/special.gamma(0.5+0.5*5)],
[4, 0.5+4, 1.5-2*4, -1./3, (8./9)**(-2*4)*special.gamma(4./3) *
special.gamma(1.5-2*4)/special.gamma(3./2)/special.gamma(4./3-2*4)],
# and some others
# ticket #424
[1.5, -0.5, 1.0, -10.0, 4.1300097765277476484],
# negative integer a or b, with c-a-b integer and x > 0.9
[-2,3,1,0.95,0.715],
[2,-3,1,0.95,-0.007],
[-6,3,1,0.95,0.0000810625],
[2,-5,1,0.95,-0.000029375],
# huge negative integers
(10, -900, 10.5, 0.99, 1.91853705796607664803709475658e-24),
(10, -900, -10.5, 0.99, 3.54279200040355710199058559155e-18),
]
for i, (a, b, c, x, v) in enumerate(values):
cv = special.hyp2f1(a, b, c, x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_hyp3f0(self):
pass
def test_hyperu(self):
val1 = special.hyperu(1,0.1,100)
assert_almost_equal(val1,0.0098153,7)
a,b = [0.3,0.6,1.2,-2.7],[1.5,3.2,-0.4,-3.2]
a,b = asarray(a), asarray(b)
z = 0.5
hypu = special.hyperu(a,b,z)
hprl = (pi/sin(pi*b))*(special.hyp1f1(a,b,z) /
(special.gamma(1+a-b)*special.gamma(b)) -
z**(1-b)*special.hyp1f1(1+a-b,2-b,z)
/ (special.gamma(a)*special.gamma(2-b)))
assert_array_almost_equal(hypu,hprl,12)
def test_hyperu_gh2287(self):
assert_almost_equal(special.hyperu(1, 1.5, 20.2),
0.048360918656699191, 12)
class TestBessel(TestCase):
def test_itj0y0(self):
it0 = array(special.itj0y0(.2))
assert_array_almost_equal(it0,array([0.19933433254006822, -0.34570883800412566]),8)
def test_it2j0y0(self):
it2 = array(special.it2j0y0(.2))
assert_array_almost_equal(it2,array([0.0049937546274601858, -0.43423067011231614]),8)
def test_negv_iv(self):
assert_equal(special.iv(3,2), special.iv(-3,2))
def test_j0(self):
oz = special.j0(.1)
ozr = special.jn(0,.1)
assert_almost_equal(oz,ozr,8)
def test_j1(self):
o1 = special.j1(.1)
o1r = special.jn(1,.1)
assert_almost_equal(o1,o1r,8)
def test_jn(self):
jnnr = special.jn(1,.2)
assert_almost_equal(jnnr,0.099500832639235995,8)
def test_negv_jv(self):
assert_almost_equal(special.jv(-3,2), -special.jv(3,2), 14)
def test_jv(self):
values = [[0, 0.1, 0.99750156206604002],
[2./3, 1e-8, 0.3239028506761532e-5],
[2./3, 1e-10, 0.1503423854873779e-6],
[3.1, 1e-10, 0.1711956265409013e-32],
[2./3, 4.0, -0.2325440850267039],
]
for i, (v, x, y) in enumerate(values):
yc = special.jv(v, x)
assert_almost_equal(yc, y, 8, err_msg='test #%d' % i)
def test_negv_jve(self):
assert_almost_equal(special.jve(-3,2), -special.jve(3,2), 14)
def test_jve(self):
jvexp = special.jve(1,.2)
assert_almost_equal(jvexp,0.099500832639235995,8)
jvexp1 = special.jve(1,.2+1j)
z = .2+1j
jvexpr = special.jv(1,z)*exp(-abs(z.imag))
assert_almost_equal(jvexp1,jvexpr,8)
def test_jn_zeros(self):
jn0 = special.jn_zeros(0,5)
jn1 = special.jn_zeros(1,5)
assert_array_almost_equal(jn0,array([2.4048255577,
5.5200781103,
8.6537279129,
11.7915344391,
14.9309177086]),4)
assert_array_almost_equal(jn1,array([3.83171,
7.01559,
10.17347,
13.32369,
16.47063]),4)
jn102 = special.jn_zeros(102,5)
assert_tol_equal(jn102, array([110.89174935992040343,
117.83464175788308398,
123.70194191713507279,
129.02417238949092824,
134.00114761868422559]), rtol=1e-13)
jn301 = special.jn_zeros(301,5)
assert_tol_equal(jn301, array([313.59097866698830153,
323.21549776096288280,
331.22338738656748796,
338.39676338872084500,
345.03284233056064157]), rtol=1e-13)
def test_jn_zeros_slow(self):
jn0 = special.jn_zeros(0, 300)
assert_tol_equal(jn0[260-1], 816.02884495068867280, rtol=1e-13)
assert_tol_equal(jn0[280-1], 878.86068707124422606, rtol=1e-13)
assert_tol_equal(jn0[300-1], 941.69253065317954064, rtol=1e-13)
jn10 = special.jn_zeros(10, 300)
assert_tol_equal(jn10[260-1], 831.67668514305631151, rtol=1e-13)
assert_tol_equal(jn10[280-1], 894.51275095371316931, rtol=1e-13)
assert_tol_equal(jn10[300-1], 957.34826370866539775, rtol=1e-13)
jn3010 = special.jn_zeros(3010,5)
assert_tol_equal(jn3010, array([3036.86590780927,
3057.06598526482,
3073.66360690272,
3088.37736494778,
3101.86438139042]), rtol=1e-8)
def test_jnjnp_zeros(self):
jn = special.jn
def jnp(n, x):
return (jn(n-1,x) - jn(n+1,x))/2
for nt in range(1, 30):
z, n, m, t = special.jnjnp_zeros(nt)
for zz, nn, tt in zip(z, n, t):
if tt == 0:
assert_allclose(jn(nn, zz), 0, atol=1e-6)
elif tt == 1:
assert_allclose(jnp(nn, zz), 0, atol=1e-6)
else:
raise AssertionError("Invalid t return for nt=%d" % nt)
def test_jnp_zeros(self):
jnp = special.jnp_zeros(1,5)
assert_array_almost_equal(jnp, array([1.84118,
5.33144,
8.53632,
11.70600,
14.86359]),4)
jnp = special.jnp_zeros(443,5)
assert_tol_equal(special.jvp(443, jnp), 0, atol=1e-15)
def test_jnyn_zeros(self):
jnz = special.jnyn_zeros(1,5)
assert_array_almost_equal(jnz,(array([3.83171,
7.01559,
10.17347,
13.32369,
16.47063]),
array([1.84118,
5.33144,
8.53632,
11.70600,
14.86359]),
array([2.19714,
5.42968,
8.59601,
11.74915,
14.89744]),
array([3.68302,
6.94150,
10.12340,
13.28576,
16.44006])),5)
def test_jvp(self):
jvprim = special.jvp(2,2)
jv0 = (special.jv(1,2)-special.jv(3,2))/2
assert_almost_equal(jvprim,jv0,10)
def test_k0(self):
ozk = special.k0(.1)
ozkr = special.kv(0,.1)
assert_almost_equal(ozk,ozkr,8)
def test_k0e(self):
ozke = special.k0e(.1)
ozker = special.kve(0,.1)
assert_almost_equal(ozke,ozker,8)
def test_k1(self):
o1k = special.k1(.1)
o1kr = special.kv(1,.1)
assert_almost_equal(o1k,o1kr,8)
def test_k1e(self):
o1ke = special.k1e(.1)
o1ker = special.kve(1,.1)
assert_almost_equal(o1ke,o1ker,8)
def test_jacobi(self):
a = 5*rand() - 1
b = 5*rand() - 1
P0 = special.jacobi(0,a,b)
P1 = special.jacobi(1,a,b)
P2 = special.jacobi(2,a,b)
P3 = special.jacobi(3,a,b)
assert_array_almost_equal(P0.c,[1],13)
assert_array_almost_equal(P1.c,array([a+b+2,a-b])/2.0,13)
cp = [(a+b+3)*(a+b+4), 4*(a+b+3)*(a+2), 4*(a+1)*(a+2)]
p2c = [cp[0],cp[1]-2*cp[0],cp[2]-cp[1]+cp[0]]
assert_array_almost_equal(P2.c,array(p2c)/8.0,13)
cp = [(a+b+4)*(a+b+5)*(a+b+6),6*(a+b+4)*(a+b+5)*(a+3),
12*(a+b+4)*(a+2)*(a+3),8*(a+1)*(a+2)*(a+3)]
p3c = [cp[0],cp[1]-3*cp[0],cp[2]-2*cp[1]+3*cp[0],cp[3]-cp[2]+cp[1]-cp[0]]
assert_array_almost_equal(P3.c,array(p3c)/48.0,13)
def test_kn(self):
kn1 = special.kn(0,.2)
assert_almost_equal(kn1,1.7527038555281462,8)
def test_negv_kv(self):
assert_equal(special.kv(3.0, 2.2), special.kv(-3.0, 2.2))
def test_kv0(self):
kv0 = special.kv(0,.2)
assert_almost_equal(kv0, 1.7527038555281462, 10)
def test_kv1(self):
kv1 = special.kv(1,0.2)
assert_almost_equal(kv1, 4.775972543220472, 10)
def test_kv2(self):
kv2 = special.kv(2,0.2)
assert_almost_equal(kv2, 49.51242928773287, 10)
def test_kn_largeorder(self):
assert_allclose(special.kn(32, 1), 1.7516596664574289e+43)
def test_kv_largearg(self):
assert_equal(special.kv(0, 1e19), 0)
def test_negv_kve(self):
assert_equal(special.kve(3.0, 2.2), special.kve(-3.0, 2.2))
def test_kve(self):
kve1 = special.kve(0,.2)
kv1 = special.kv(0,.2)*exp(.2)
assert_almost_equal(kve1,kv1,8)
z = .2+1j
kve2 = special.kve(0,z)
kv2 = special.kv(0,z)*exp(z)
assert_almost_equal(kve2,kv2,8)
def test_kvp_v0n1(self):
z = 2.2
assert_almost_equal(-special.kv(1,z), special.kvp(0,z, n=1), 10)
def test_kvp_n1(self):
v = 3.
z = 2.2
xc = -special.kv(v+1,z) + v/z*special.kv(v,z)
x = special.kvp(v,z, n=1)
assert_almost_equal(xc, x, 10) # this function (kvp) is broken
def test_kvp_n2(self):
v = 3.
z = 2.2
xc = (z**2+v**2-v)/z**2 * special.kv(v,z) + special.kv(v+1,z)/z
x = special.kvp(v, z, n=2)
assert_almost_equal(xc, x, 10)
def test_y0(self):
oz = special.y0(.1)
ozr = special.yn(0,.1)
assert_almost_equal(oz,ozr,8)
def test_y1(self):
o1 = special.y1(.1)
o1r = special.yn(1,.1)
assert_almost_equal(o1,o1r,8)
def test_y0_zeros(self):
yo,ypo = special.y0_zeros(2)
zo,zpo = special.y0_zeros(2,complex=1)
all = r_[yo,zo]
allval = r_[ypo,zpo]
assert_array_almost_equal(abs(special.yv(0.0,all)),0.0,11)
assert_array_almost_equal(abs(special.yv(1,all)-allval),0.0,11)
def test_y1_zeros(self):
y1 = special.y1_zeros(1)
assert_array_almost_equal(y1,(array([2.19714]),array([0.52079])),5)
def test_y1p_zeros(self):
y1p = special.y1p_zeros(1,complex=1)
assert_array_almost_equal(y1p,(array([0.5768+0.904j]), array([-0.7635+0.5892j])),3)
def test_yn_zeros(self):
an = special.yn_zeros(4,2)
assert_array_almost_equal(an,array([5.64515, 9.36162]),5)
an = special.yn_zeros(443,5)
assert_tol_equal(an, [450.13573091578090314, 463.05692376675001542,
472.80651546418663566, 481.27353184725625838,
488.98055964441374646], rtol=1e-15)
def test_ynp_zeros(self):
ao = special.ynp_zeros(0,2)
assert_array_almost_equal(ao,array([2.19714133, 5.42968104]),6)
ao = special.ynp_zeros(43,5)
assert_tol_equal(special.yvp(43, ao), 0, atol=1e-15)
ao = special.ynp_zeros(443,5)
assert_tol_equal(special.yvp(443, ao), 0, atol=1e-9)
def test_ynp_zeros_large_order(self):
ao = special.ynp_zeros(443,5)
assert_tol_equal(special.yvp(443, ao), 0, atol=1e-14)
def test_yn(self):
yn2n = special.yn(1,.2)
assert_almost_equal(yn2n,-3.3238249881118471,8)
def test_negv_yv(self):
assert_almost_equal(special.yv(-3,2), -special.yv(3,2), 14)
def test_yv(self):
yv2 = special.yv(1,.2)
assert_almost_equal(yv2,-3.3238249881118471,8)
def test_negv_yve(self):
assert_almost_equal(special.yve(-3,2), -special.yve(3,2), 14)
def test_yve(self):
yve2 = special.yve(1,.2)
assert_almost_equal(yve2,-3.3238249881118471,8)
yve2r = special.yv(1,.2+1j)*exp(-1)
yve22 = special.yve(1,.2+1j)
assert_almost_equal(yve22,yve2r,8)
def test_yvp(self):
yvpr = (special.yv(1,.2) - special.yv(3,.2))/2.0
yvp1 = special.yvp(2,.2)
assert_array_almost_equal(yvp1,yvpr,10)
def _cephes_vs_amos_points(self):
"""Yield points at which to compare Cephes implementation to AMOS"""
# check several points, including large-amplitude ones
for v in [-120, -100.3, -20., -10., -1., -.5,
0., 1., 12.49, 120., 301]:
for z in [-1300, -11, -10, -1, 1., 10., 200.5, 401., 600.5,
700.6, 1300, 10003]:
yield v, z
# check half-integers; these are problematic points at least
# for cephes/iv
for v in 0.5 + arange(-60, 60):
yield v, 3.5
def check_cephes_vs_amos(self, f1, f2, rtol=1e-11, atol=0, skip=None):
for v, z in self._cephes_vs_amos_points():
if skip is not None and skip(v, z):
continue
c1, c2, c3 = f1(v, z), f1(v,z+0j), f2(int(v), z)
if np.isinf(c1):
assert_(np.abs(c2) >= 1e300, (v, z))
elif np.isnan(c1):
assert_(c2.imag != 0, (v, z))
else:
assert_tol_equal(c1, c2, err_msg=(v, z), rtol=rtol, atol=atol)
if v == int(v):
assert_tol_equal(c3, c2, err_msg=(v, z),
rtol=rtol, atol=atol)
def test_jv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.jv, special.jn, rtol=1e-10, atol=1e-305)
def test_yv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305)
def test_yv_cephes_vs_amos_only_small_orders(self):
skipper = lambda v, z: (abs(v) > 50)
self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305, skip=skipper)
def test_iv_cephes_vs_amos(self):
olderr = np.seterr(all='ignore')
try:
self.check_cephes_vs_amos(special.iv, special.iv, rtol=5e-9, atol=1e-305)
finally:
np.seterr(**olderr)
@dec.slow
def test_iv_cephes_vs_amos_mass_test(self):
N = 1000000
np.random.seed(1)
v = np.random.pareto(0.5, N) * (-1)**np.random.randint(2, size=N)
x = np.random.pareto(0.2, N) * (-1)**np.random.randint(2, size=N)
imsk = (np.random.randint(8, size=N) == 0)
v[imsk] = v[imsk].astype(int)
old_err = np.seterr(all='ignore')
try:
c1 = special.iv(v, x)
c2 = special.iv(v, x+0j)
# deal with differences in the inf and zero cutoffs
c1[abs(c1) > 1e300] = np.inf
c2[abs(c2) > 1e300] = np.inf
c1[abs(c1) < 1e-300] = 0
c2[abs(c2) < 1e-300] = 0
dc = abs(c1/c2 - 1)
dc[np.isnan(dc)] = 0
finally:
np.seterr(**old_err)
k = np.argmax(dc)
# Most error apparently comes from AMOS and not our implementation;
# there are some problems near integer orders there
assert_(dc[k] < 2e-7, (v[k], x[k], special.iv(v[k], x[k]), special.iv(v[k], x[k]+0j)))
def test_kv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.kv, special.kn, rtol=1e-9, atol=1e-305)
self.check_cephes_vs_amos(special.kv, special.kv, rtol=1e-9, atol=1e-305)
def test_ticket_623(self):
assert_tol_equal(special.jv(3, 4), 0.43017147387562193)
assert_tol_equal(special.jv(301, 1300), 0.0183487151115275)
assert_tol_equal(special.jv(301, 1296.0682), -0.0224174325312048)
def test_ticket_853(self):
"""Negative-order Bessels"""
# cephes
assert_tol_equal(special.jv(-1, 1), -0.4400505857449335)
assert_tol_equal(special.jv(-2, 1), 0.1149034849319005)
assert_tol_equal(special.yv(-1, 1), 0.7812128213002887)
assert_tol_equal(special.yv(-2, 1), -1.650682606816255)
assert_tol_equal(special.iv(-1, 1), 0.5651591039924851)
assert_tol_equal(special.iv(-2, 1), 0.1357476697670383)
assert_tol_equal(special.kv(-1, 1), 0.6019072301972347)
assert_tol_equal(special.kv(-2, 1), 1.624838898635178)
assert_tol_equal(special.jv(-0.5, 1), 0.43109886801837607952)
assert_tol_equal(special.yv(-0.5, 1), 0.6713967071418031)
assert_tol_equal(special.iv(-0.5, 1), 1.231200214592967)
assert_tol_equal(special.kv(-0.5, 1), 0.4610685044478945)
# amos
assert_tol_equal(special.jv(-1, 1+0j), -0.4400505857449335)
assert_tol_equal(special.jv(-2, 1+0j), 0.1149034849319005)
assert_tol_equal(special.yv(-1, 1+0j), 0.7812128213002887)
assert_tol_equal(special.yv(-2, 1+0j), -1.650682606816255)
assert_tol_equal(special.iv(-1, 1+0j), 0.5651591039924851)
assert_tol_equal(special.iv(-2, 1+0j), 0.1357476697670383)
assert_tol_equal(special.kv(-1, 1+0j), 0.6019072301972347)
assert_tol_equal(special.kv(-2, 1+0j), 1.624838898635178)
assert_tol_equal(special.jv(-0.5, 1+0j), 0.43109886801837607952)
assert_tol_equal(special.jv(-0.5, 1+1j), 0.2628946385649065-0.827050182040562j)
assert_tol_equal(special.yv(-0.5, 1+0j), 0.6713967071418031)
assert_tol_equal(special.yv(-0.5, 1+1j), 0.967901282890131+0.0602046062142816j)
assert_tol_equal(special.iv(-0.5, 1+0j), 1.231200214592967)
assert_tol_equal(special.iv(-0.5, 1+1j), 0.77070737376928+0.39891821043561j)
assert_tol_equal(special.kv(-0.5, 1+0j), 0.4610685044478945)
assert_tol_equal(special.kv(-0.5, 1+1j), 0.06868578341999-0.38157825981268j)
assert_tol_equal(special.jve(-0.5,1+0.3j), special.jv(-0.5, 1+0.3j)*exp(-0.3))
assert_tol_equal(special.yve(-0.5,1+0.3j), special.yv(-0.5, 1+0.3j)*exp(-0.3))
assert_tol_equal(special.ive(-0.5,0.3+1j), special.iv(-0.5, 0.3+1j)*exp(-0.3))
assert_tol_equal(special.kve(-0.5,0.3+1j), special.kv(-0.5, 0.3+1j)*exp(0.3+1j))
assert_tol_equal(special.hankel1(-0.5, 1+1j), special.jv(-0.5, 1+1j) + 1j*special.yv(-0.5,1+1j))
assert_tol_equal(special.hankel2(-0.5, 1+1j), special.jv(-0.5, 1+1j) - 1j*special.yv(-0.5,1+1j))
def test_ticket_854(self):
"""Real-valued Bessel domains"""
assert_(isnan(special.jv(0.5, -1)))
assert_(isnan(special.iv(0.5, -1)))
assert_(isnan(special.yv(0.5, -1)))
assert_(isnan(special.yv(1, -1)))
assert_(isnan(special.kv(0.5, -1)))
assert_(isnan(special.kv(1, -1)))
assert_(isnan(special.jve(0.5, -1)))
assert_(isnan(special.ive(0.5, -1)))
assert_(isnan(special.yve(0.5, -1)))
assert_(isnan(special.yve(1, -1)))
assert_(isnan(special.kve(0.5, -1)))
assert_(isnan(special.kve(1, -1)))
assert_(isnan(special.airye(-1)[0:2]).all(), special.airye(-1))
assert_(not isnan(special.airye(-1)[2:4]).any(), special.airye(-1))
def test_ticket_503(self):
"""Real-valued Bessel I overflow"""
assert_tol_equal(special.iv(1, 700), 1.528500390233901e302)
assert_tol_equal(special.iv(1000, 1120), 1.301564549405821e301)
def test_iv_hyperg_poles(self):
assert_tol_equal(special.iv(-0.5, 1), 1.231200214592967)
def iv_series(self, v, z, n=200):
k = arange(0, n).astype(float_)
r = (v+2*k)*log(.5*z) - special.gammaln(k+1) - special.gammaln(v+k+1)
r[isnan(r)] = inf
r = exp(r)
err = abs(r).max() * finfo(float_).eps * n + abs(r[-1])*10
return r.sum(), err
def test_i0_series(self):
for z in [1., 10., 200.5]:
value, err = self.iv_series(0, z)
assert_tol_equal(special.i0(z), value, atol=err, err_msg=z)
def test_i1_series(self):
for z in [1., 10., 200.5]:
value, err = self.iv_series(1, z)
assert_tol_equal(special.i1(z), value, atol=err, err_msg=z)
def test_iv_series(self):
for v in [-20., -10., -1., 0., 1., 12.49, 120.]:
for z in [1., 10., 200.5, -1+2j]:
value, err = self.iv_series(v, z)
assert_tol_equal(special.iv(v, z), value, atol=err, err_msg=(v, z))
def test_i0(self):
values = [[0.0, 1.0],
[1e-10, 1.0],
[0.1, 0.9071009258],
[0.5, 0.6450352706],
[1.0, 0.4657596077],
[2.5, 0.2700464416],
[5.0, 0.1835408126],
[20.0, 0.0897803119],
]
for i, (x, v) in enumerate(values):
cv = special.i0(x) * exp(-x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_i0e(self):
oize = special.i0e(.1)
oizer = special.ive(0,.1)
assert_almost_equal(oize,oizer,8)
def test_i1(self):
values = [[0.0, 0.0],
[1e-10, 0.4999999999500000e-10],
[0.1, 0.0452984468],
[0.5, 0.1564208032],
[1.0, 0.2079104154],
[5.0, 0.1639722669],
[20.0, 0.0875062222],
]
for i, (x, v) in enumerate(values):
cv = special.i1(x) * exp(-x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_i1e(self):
oi1e = special.i1e(.1)
oi1er = special.ive(1,.1)
assert_almost_equal(oi1e,oi1er,8)
def test_iti0k0(self):
iti0 = array(special.iti0k0(5))
assert_array_almost_equal(iti0,array([31.848667776169801, 1.5673873907283657]),5)
def test_it2i0k0(self):
it2k = special.it2i0k0(.1)
assert_array_almost_equal(it2k,array([0.0012503906973464409, 3.3309450354686687]),6)
def test_iv(self):
iv1 = special.iv(0,.1)*exp(-.1)
assert_almost_equal(iv1,0.90710092578230106,10)
def test_negv_ive(self):
assert_equal(special.ive(3,2), special.ive(-3,2))
def test_ive(self):
ive1 = special.ive(0,.1)
iv1 = special.iv(0,.1)*exp(-.1)
assert_almost_equal(ive1,iv1,10)
def test_ivp0(self):
assert_almost_equal(special.iv(1,2), special.ivp(0,2), 10)
def test_ivp(self):
y = (special.iv(0,2) + special.iv(2,2))/2
x = special.ivp(1,2)
assert_almost_equal(x,y,10)
class TestLaguerre(TestCase):
def test_laguerre(self):
lag0 = special.laguerre(0)
lag1 = special.laguerre(1)
lag2 = special.laguerre(2)
lag3 = special.laguerre(3)
lag4 = special.laguerre(4)
lag5 = special.laguerre(5)
assert_array_almost_equal(lag0.c,[1],13)
assert_array_almost_equal(lag1.c,[-1,1],13)
assert_array_almost_equal(lag2.c,array([1,-4,2])/2.0,13)
assert_array_almost_equal(lag3.c,array([-1,9,-18,6])/6.0,13)
assert_array_almost_equal(lag4.c,array([1,-16,72,-96,24])/24.0,13)
assert_array_almost_equal(lag5.c,array([-1,25,-200,600,-600,120])/120.0,13)
def test_genlaguerre(self):
k = 5*rand()-0.9
lag0 = special.genlaguerre(0,k)
lag1 = special.genlaguerre(1,k)
lag2 = special.genlaguerre(2,k)
lag3 = special.genlaguerre(3,k)
assert_equal(lag0.c,[1])
assert_equal(lag1.c,[-1,k+1])
assert_almost_equal(lag2.c,array([1,-2*(k+2),(k+1.)*(k+2.)])/2.0)
assert_almost_equal(lag3.c,array([-1,3*(k+3),-3*(k+2)*(k+3),(k+1)*(k+2)*(k+3)])/6.0)
# Base polynomials come from Abrahmowitz and Stegan
class TestLegendre(TestCase):
def test_legendre(self):
leg0 = special.legendre(0)
leg1 = special.legendre(1)
leg2 = special.legendre(2)
leg3 = special.legendre(3)
leg4 = special.legendre(4)
leg5 = special.legendre(5)
assert_equal(leg0.c, [1])
assert_equal(leg1.c, [1,0])
assert_almost_equal(leg2.c, array([3,0,-1])/2.0, decimal=13)
assert_almost_equal(leg3.c, array([5,0,-3,0])/2.0)
assert_almost_equal(leg4.c, array([35,0,-30,0,3])/8.0)
assert_almost_equal(leg5.c, array([63,0,-70,0,15,0])/8.0)
class TestLambda(TestCase):
def test_lmbda(self):
lam = special.lmbda(1,.1)
lamr = (array([special.jn(0,.1), 2*special.jn(1,.1)/.1]),
array([special.jvp(0,.1), -2*special.jv(1,.1)/.01 + 2*special.jvp(1,.1)/.1]))
assert_array_almost_equal(lam,lamr,8)
class TestLog1p(TestCase):
def test_log1p(self):
l1p = (special.log1p(10), special.log1p(11), special.log1p(12))
l1prl = (log(11), log(12), log(13))
assert_array_almost_equal(l1p,l1prl,8)
def test_log1pmore(self):
l1pm = (special.log1p(1), special.log1p(1.1), special.log1p(1.2))
l1pmrl = (log(2),log(2.1),log(2.2))
assert_array_almost_equal(l1pm,l1pmrl,8)
class TestLegendreFunctions(TestCase):
def test_clpmn(self):
z = 0.5+0.3j
clp = special.clpmn(2, 2, z, 3)
assert_array_almost_equal(clp,
(array([[1.0000, z, 0.5*(3*z*z-1)],
[0.0000, sqrt(z*z-1), 3*z*sqrt(z*z-1)],
[0.0000, 0.0000, 3*(z*z-1)]]),
array([[0.0000, 1.0000, 3*z],
[0.0000, z/sqrt(z*z-1), 3*(2*z*z-1)/sqrt(z*z-1)],
[0.0000, 0.0000, 6*z]])),
7)
def test_clpmn_close_to_real_2(self):
eps = 1e-10
m = 1
n = 3
x = 0.5
clp_plus = special.clpmn(m, n, x+1j*eps, 2)[0][m, n]
clp_minus = special.clpmn(m, n, x-1j*eps, 2)[0][m, n]
assert_array_almost_equal(array([clp_plus, clp_minus]),
array([special.lpmv(m, n, x),
special.lpmv(m, n, x)]),
7)
def test_clpmn_close_to_real_3(self):
eps = 1e-10
m = 1
n = 3
x = 0.5
clp_plus = special.clpmn(m, n, x+1j*eps, 3)[0][m, n]
clp_minus = special.clpmn(m, n, x-1j*eps, 3)[0][m, n]
assert_array_almost_equal(array([clp_plus, clp_minus]),
array([special.lpmv(m, n, x)*np.exp(-0.5j*m*np.pi),
special.lpmv(m, n, x)*np.exp(0.5j*m*np.pi)]),
7)
def test_clpmn_across_unit_circle(self):
eps = 1e-7
m = 1
n = 1
x = 1j
for type in [2, 3]:
assert_almost_equal(special.clpmn(m, n, x+1j*eps, type)[0][m, n],
special.clpmn(m, n, x-1j*eps, type)[0][m, n], 6)
def test_inf(self):
for z in (1, -1):
for n in range(4):
for m in range(1, n):
lp = special.clpmn(m, n, z)
assert_(np.isinf(lp[1][1,1:]).all())
lp = special.lpmn(m, n, z)
assert_(np.isinf(lp[1][1,1:]).all())
def test_deriv_clpmn(self):
# data inside and outside of the unit circle
zvals = [0.5+0.5j, -0.5+0.5j, -0.5-0.5j, 0.5-0.5j,
1+1j, -1+1j, -1-1j, 1-1j]
m = 2
n = 3
for type in [2, 3]:
for z in zvals:
for h in [1e-3, 1e-3j]:
approx_derivative = (special.clpmn(m, n, z+0.5*h, type)[0]
- special.clpmn(m, n, z-0.5*h, type)[0])/h
assert_allclose(special.clpmn(m, n, z, type)[1],
approx_derivative,
rtol=1e-4)
def test_lpmn(self):
lp = special.lpmn(0,2,.5)
assert_array_almost_equal(lp,(array([[1.00000,
0.50000,
-0.12500]]),
array([[0.00000,
1.00000,
1.50000]])),4)
def test_lpn(self):
lpnf = special.lpn(2,.5)
assert_array_almost_equal(lpnf,(array([1.00000,
0.50000,
-0.12500]),
array([0.00000,
1.00000,
1.50000])),4)
def test_lpmv(self):
lp = special.lpmv(0,2,.5)
assert_almost_equal(lp,-0.125,7)
lp = special.lpmv(0,40,.001)
assert_almost_equal(lp,0.1252678976534484,7)
# XXX: this is outside the domain of the current implementation,
# so ensure it returns a NaN rather than a wrong answer.
olderr = np.seterr(all='ignore')
try:
lp = special.lpmv(-1,-1,.001)
finally:
np.seterr(**olderr)
assert_(lp != 0 or np.isnan(lp))
def test_lqmn(self):
lqmnf = special.lqmn(0,2,.5)
lqf = special.lqn(2,.5)
assert_array_almost_equal(lqmnf[0][0],lqf[0],4)
assert_array_almost_equal(lqmnf[1][0],lqf[1],4)
def test_lqmn_gt1(self):
"""algorithm for real arguments changes at 1.0001
test against analytical result for m=2, n=1
"""
x0 = 1.0001
delta = 0.00002
for x in (x0-delta, x0+delta):
lq = special.lqmn(2, 1, x)[0][-1, -1]
expected = 2/(x*x-1)
assert_almost_equal(lq, expected)
def test_lqmn_shape(self):
a, b = special.lqmn(4, 4, 1.1)
assert_equal(a.shape, (5, 5))
assert_equal(b.shape, (5, 5))
a, b = special.lqmn(4, 0, 1.1)
assert_equal(a.shape, (5, 1))
assert_equal(b.shape, (5, 1))
def test_lqn(self):
lqf = special.lqn(2,.5)
assert_array_almost_equal(lqf,(array([0.5493, -0.7253, -0.8187]),
array([1.3333, 1.216, -0.8427])),4)
class TestMathieu(TestCase):
def test_mathieu_a(self):
pass
def test_mathieu_even_coef(self):
mc = special.mathieu_even_coef(2,5)
# Q not defined broken and cannot figure out proper reporting order
def test_mathieu_odd_coef(self):
# same problem as above
pass
class TestFresnelIntegral(TestCase):
def test_modfresnelp(self):
pass
def test_modfresnelm(self):
pass
class TestOblCvSeq(TestCase):
def test_obl_cv_seq(self):
obl = special.obl_cv_seq(0,3,1)
assert_array_almost_equal(obl,array([-0.348602,
1.393206,
5.486800,
11.492120]),5)
class TestParabolicCylinder(TestCase):
def test_pbdn_seq(self):
pb = special.pbdn_seq(1,.1)
assert_array_almost_equal(pb,(array([0.9975,
0.0998]),
array([-0.0499,
0.9925])),4)
def test_pbdv(self):
pbv = special.pbdv(1,.2)
derrl = 1/2*(.2)*special.pbdv(1,.2)[0] - special.pbdv(0,.2)[0]
def test_pbdv_seq(self):
pbn = special.pbdn_seq(1,.1)
pbv = special.pbdv_seq(1,.1)
assert_array_almost_equal(pbv,(real(pbn[0]),real(pbn[1])),4)
def test_pbdv_points(self):
# simple case
eta = np.linspace(-10, 10, 5)
z = 2**(eta/2)*np.sqrt(np.pi)/special.gamma(.5-.5*eta)
assert_tol_equal(special.pbdv(eta, 0.)[0], z, rtol=1e-14, atol=1e-14)
# some points
assert_tol_equal(special.pbdv(10.34, 20.44)[0], 1.3731383034455e-32, rtol=1e-12)
assert_tol_equal(special.pbdv(-9.53, 3.44)[0], 3.166735001119246e-8, rtol=1e-12)
def test_pbdv_gradient(self):
x = np.linspace(-4, 4, 8)[:,None]
eta = np.linspace(-10, 10, 5)[None,:]
p = special.pbdv(eta, x)
eps = 1e-7 + 1e-7*abs(x)
dp = (special.pbdv(eta, x + eps)[0] - special.pbdv(eta, x - eps)[0]) / eps / 2.
assert_tol_equal(p[1], dp, rtol=1e-6, atol=1e-6)
def test_pbvv_gradient(self):
x = np.linspace(-4, 4, 8)[:,None]
eta = np.linspace(-10, 10, 5)[None,:]
p = special.pbvv(eta, x)
eps = 1e-7 + 1e-7*abs(x)
dp = (special.pbvv(eta, x + eps)[0] - special.pbvv(eta, x - eps)[0]) / eps / 2.
assert_tol_equal(p[1], dp, rtol=1e-6, atol=1e-6)
class TestPolygamma(TestCase):
# from Table 6.2 (pg. 271) of A&S
def test_polygamma(self):
poly2 = special.polygamma(2,1)
poly3 = special.polygamma(3,1)
assert_almost_equal(poly2,-2.4041138063,10)
assert_almost_equal(poly3,6.4939394023,10)
# Test polygamma(0, x) == psi(x)
x = [2, 3, 1.1e14]
assert_almost_equal(special.polygamma(0, x), special.psi(x))
# Test broadcasting
n = [0, 1, 2]
x = [0.5, 1.5, 2.5]
expected = [-1.9635100260214238, 0.93480220054467933,
-0.23620405164172739]
assert_almost_equal(special.polygamma(n, x), expected)
expected = np.row_stack([expected]*2)
assert_almost_equal(special.polygamma(n, np.row_stack([x]*2)),
expected)
assert_almost_equal(special.polygamma(np.row_stack([n]*2), x),
expected)
class TestProCvSeq(TestCase):
def test_pro_cv_seq(self):
prol = special.pro_cv_seq(0,3,1)
assert_array_almost_equal(prol,array([0.319000,
2.593084,
6.533471,
12.514462]),5)
class TestPsi(TestCase):
def test_psi(self):
ps = special.psi(1)
assert_almost_equal(ps,-0.57721566490153287,8)
class TestRadian(TestCase):
def test_radian(self):
rad = special.radian(90,0,0)
assert_almost_equal(rad,pi/2.0,5)
def test_radianmore(self):
rad1 = special.radian(90,1,60)
assert_almost_equal(rad1,pi/2+0.0005816135199345904,5)
class TestRiccati(TestCase):
def test_riccati_jn(self):
jnrl = (special.sph_jn(1,.2)[0]*.2,special.sph_jn(1,.2)[0]+special.sph_jn(1,.2)[1]*.2)
ricjn = special.riccati_jn(1,.2)
assert_array_almost_equal(ricjn,jnrl,8)
def test_riccati_yn(self):
ynrl = (special.sph_yn(1,.2)[0]*.2,special.sph_yn(1,.2)[0]+special.sph_yn(1,.2)[1]*.2)
ricyn = special.riccati_yn(1,.2)
assert_array_almost_equal(ricyn,ynrl,8)
class TestRound(TestCase):
def test_round(self):
rnd = list(map(int,(special.round(10.1),special.round(10.4),special.round(10.5),special.round(10.6))))
# Note: According to the documentation, scipy.special.round is
# supposed to round to the nearest even number if the fractional
# part is exactly 0.5. On some platforms, this does not appear
# to work and thus this test may fail. However, this unit test is
# correctly written.
rndrl = (10,10,10,11)
assert_array_equal(rnd,rndrl)
def test_sph_harm():
# Tests derived from tables in
# http://en.wikipedia.org/wiki/Table_of_spherical_harmonics
sh = special.sph_harm
pi = np.pi
exp = np.exp
sqrt = np.sqrt
sin = np.sin
cos = np.cos
yield (assert_array_almost_equal, sh(0,0,0,0),
0.5/sqrt(pi))
yield (assert_array_almost_equal, sh(-2,2,0.,pi/4),
0.25*sqrt(15./(2.*pi)) *
(sin(pi/4))**2.)
yield (assert_array_almost_equal, sh(-2,2,0.,pi/2),
0.25*sqrt(15./(2.*pi)))
yield (assert_array_almost_equal, sh(2,2,pi,pi/2),
0.25*sqrt(15/(2.*pi)) *
exp(0+2.*pi*1j)*sin(pi/2.)**2.)
yield (assert_array_almost_equal, sh(2,4,pi/4.,pi/3.),
(3./8.)*sqrt(5./(2.*pi)) *
exp(0+2.*pi/4.*1j) *
sin(pi/3.)**2. *
(7.*cos(pi/3.)**2.-1))
yield (assert_array_almost_equal, sh(4,4,pi/8.,pi/6.),
(3./16.)*sqrt(35./(2.*pi)) *
exp(0+4.*pi/8.*1j)*sin(pi/6.)**4.)
def test_sph_harm_ufunc_loop_selection():
# see https://github.com/scipy/scipy/issues/4895
dt = np.dtype(np.complex128)
assert_equal(special.sph_harm(0, 0, 0, 0).dtype, dt)
assert_equal(special.sph_harm([0], 0, 0, 0).dtype, dt)
assert_equal(special.sph_harm(0, [0], 0, 0).dtype, dt)
assert_equal(special.sph_harm(0, 0, [0], 0).dtype, dt)
assert_equal(special.sph_harm(0, 0, 0, [0]).dtype, dt)
assert_equal(special.sph_harm([0], [0], [0], [0]).dtype, dt)
class TestSpherical(TestCase):
def test_sph_harm(self):
# see test_sph_harm function
pass
def test_sph_in(self):
i1n = special.sph_in(1,.2)
inp0 = (i1n[0][1])
inp1 = (i1n[0][0] - 2.0/0.2 * i1n[0][1])
assert_array_almost_equal(i1n[0],array([1.0066800127054699381,
0.066933714568029540839]),12)
assert_array_almost_equal(i1n[1],[inp0,inp1],12)
def test_sph_inkn(self):
spikn = r_[special.sph_in(1,.2) + special.sph_kn(1,.2)]
inkn = r_[special.sph_inkn(1,.2)]
assert_array_almost_equal(inkn,spikn,10)
def test_sph_in_kn_order0(self):
x = 1.
sph_i0 = special.sph_in(0, x)
sph_i0_expected = np.array([np.sinh(x)/x,
np.cosh(x)/x-np.sinh(x)/x**2])
assert_array_almost_equal(r_[sph_i0], sph_i0_expected)
sph_k0 = special.sph_kn(0, x)
sph_k0_expected = np.array([0.5*pi*exp(-x)/x,
-0.5*pi*exp(-x)*(1/x+1/x**2)])
assert_array_almost_equal(r_[sph_k0], sph_k0_expected)
sph_i0k0 = special.sph_inkn(0, x)
assert_array_almost_equal(r_[sph_i0+sph_k0],
r_[sph_i0k0],
10)
def test_sph_jn(self):
s1 = special.sph_jn(2,.2)
s10 = -s1[0][1]
s11 = s1[0][0]-2.0/0.2*s1[0][1]
s12 = s1[0][1]-3.0/0.2*s1[0][2]
assert_array_almost_equal(s1[0],[0.99334665397530607731,
0.066400380670322230863,
0.0026590560795273856680],12)
assert_array_almost_equal(s1[1],[s10,s11,s12],12)
def test_sph_jnyn(self):
jnyn = r_[special.sph_jn(1,.2) + special.sph_yn(1,.2)] # tuple addition
jnyn1 = r_[special.sph_jnyn(1,.2)]
assert_array_almost_equal(jnyn1,jnyn,9)
def test_sph_kn(self):
kn = special.sph_kn(2,.2)
kn0 = -kn[0][1]
kn1 = -kn[0][0]-2.0/0.2*kn[0][1]
kn2 = -kn[0][1]-3.0/0.2*kn[0][2]
assert_array_almost_equal(kn[0],[6.4302962978445670140,
38.581777787067402086,
585.15696310385559829],12)
assert_array_almost_equal(kn[1],[kn0,kn1,kn2],9)
def test_sph_yn(self):
sy1 = special.sph_yn(2,.2)[0][2]
sy2 = special.sph_yn(0,.2)[0][0]
sphpy = (special.sph_yn(1,.2)[0][0]-2*special.sph_yn(2,.2)[0][2])/3 # correct derivative value
assert_almost_equal(sy1,-377.52483,5) # previous values in the system
assert_almost_equal(sy2,-4.9003329,5)
sy3 = special.sph_yn(1,.2)[1][1]
assert_almost_equal(sy3,sphpy,4) # compare correct derivative val. (correct =-system val).
class TestStruve(object):
def _series(self, v, z, n=100):
"""Compute Struve function & error estimate from its power series."""
k = arange(0, n)
r = (-1)**k * (.5*z)**(2*k+v+1)/special.gamma(k+1.5)/special.gamma(k+v+1.5)
err = abs(r).max() * finfo(float_).eps * n
return r.sum(), err
def test_vs_series(self):
"""Check Struve function versus its power series"""
for v in [-20, -10, -7.99, -3.4, -1, 0, 1, 3.4, 12.49, 16]:
for z in [1, 10, 19, 21, 30]:
value, err = self._series(v, z)
assert_tol_equal(special.struve(v, z), value, rtol=0, atol=err), (v, z)
def test_some_values(self):
assert_tol_equal(special.struve(-7.99, 21), 0.0467547614113, rtol=1e-7)
assert_tol_equal(special.struve(-8.01, 21), 0.0398716951023, rtol=1e-8)
assert_tol_equal(special.struve(-3.0, 200), 0.0142134427432, rtol=1e-12)
assert_tol_equal(special.struve(-8.0, -41), 0.0192469727846, rtol=1e-11)
assert_equal(special.struve(-12, -41), -special.struve(-12, 41))
assert_equal(special.struve(+12, -41), -special.struve(+12, 41))
assert_equal(special.struve(-11, -41), +special.struve(-11, 41))
assert_equal(special.struve(+11, -41), +special.struve(+11, 41))
assert_(isnan(special.struve(-7.1, -1)))
assert_(isnan(special.struve(-10.1, -1)))
def test_regression_679(self):
"""Regression test for #679"""
assert_tol_equal(special.struve(-1.0, 20 - 1e-8), special.struve(-1.0, 20 + 1e-8))
assert_tol_equal(special.struve(-2.0, 20 - 1e-8), special.struve(-2.0, 20 + 1e-8))
assert_tol_equal(special.struve(-4.3, 20 - 1e-8), special.struve(-4.3, 20 + 1e-8))
def test_chi2_smalldf():
assert_almost_equal(special.chdtr(0.6,3), 0.957890536704110)
def test_chi2c_smalldf():
assert_almost_equal(special.chdtrc(0.6,3), 1-0.957890536704110)
def test_chi2_inv_smalldf():
assert_almost_equal(special.chdtri(0.6,1-0.957890536704110), 3)
def test_agm_simple():
assert_allclose(special.agm(24, 6), 13.4581714817)
assert_allclose(special.agm(1e30, 1), 2.2292230559453832047768593e28)
def test_legacy():
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
# Legacy behavior: truncating arguments to integers
assert_equal(special.bdtrc(1, 2, 0.3), special.bdtrc(1.8, 2.8, 0.3))
assert_equal(special.bdtr(1, 2, 0.3), special.bdtr(1.8, 2.8, 0.3))
assert_equal(special.bdtri(1, 2, 0.3), special.bdtri(1.8, 2.8, 0.3))
assert_equal(special.expn(1, 0.3), special.expn(1.8, 0.3))
assert_equal(special.hyp2f0(1, 2, 0.3, 1), special.hyp2f0(1, 2, 0.3, 1.8))
assert_equal(special.nbdtrc(1, 2, 0.3), special.nbdtrc(1.8, 2.8, 0.3))
assert_equal(special.nbdtr(1, 2, 0.3), special.nbdtr(1.8, 2.8, 0.3))
assert_equal(special.nbdtri(1, 2, 0.3), special.nbdtri(1.8, 2.8, 0.3))
assert_equal(special.pdtrc(1, 0.3), special.pdtrc(1.8, 0.3))
assert_equal(special.pdtr(1, 0.3), special.pdtr(1.8, 0.3))
assert_equal(special.pdtri(1, 0.3), special.pdtri(1.8, 0.3))
assert_equal(special.kn(1, 0.3), special.kn(1.8, 0.3))
assert_equal(special.yn(1, 0.3), special.yn(1.8, 0.3))
assert_equal(special.smirnov(1, 0.3), special.smirnov(1.8, 0.3))
assert_equal(special.smirnovi(1, 0.3), special.smirnovi(1.8, 0.3))
@with_special_errors
def test_error_raising():
assert_raises(special.SpecialFunctionWarning, special.iv, 1, 1e99j)
def test_xlogy():
def xfunc(x, y):
if x == 0 and not np.isnan(y):
return x
else:
return x*np.log(y)
z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0)], dtype=float)
z2 = np.r_[z1, [(0, 1j), (1, 1j)]]
w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1])
assert_func_equal(special.xlogy, w1, z1, rtol=1e-13, atol=1e-13)
w2 = np.vectorize(xfunc)(z2[:,0], z2[:,1])
assert_func_equal(special.xlogy, w2, z2, rtol=1e-13, atol=1e-13)
def test_xlog1py():
def xfunc(x, y):
if x == 0 and not np.isnan(y):
return x
else:
return x * np.log1p(y)
z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0),
(1, 1e-30)], dtype=float)
w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1])
assert_func_equal(special.xlog1py, w1, z1, rtol=1e-13, atol=1e-13)
def test_entr():
def xfunc(x):
if x < 0:
return -np.inf
else:
return -special.xlogy(x, x)
values = (0, 0.5, 1.0, np.inf)
signs = [-1, 1]
arr = []
for sgn, v in itertools.product(signs, values):
arr.append(sgn * v)
z = np.array(arr, dtype=float)
w = np.vectorize(xfunc, otypes=[np.float64])(z)
assert_func_equal(special.entr, w, z, rtol=1e-13, atol=1e-13)
def test_kl_div():
def xfunc(x, y):
if x < 0 or y < 0 or (y == 0 and x != 0):
# extension of natural domain to preserve convexity
return np.inf
elif np.isposinf(x) or np.isposinf(y):
# limits within the natural domain
return np.inf
elif x == 0:
return y
else:
return special.xlogy(x, x/y) - x + y
values = (0, 0.5, 1.0)
signs = [-1, 1]
arr = []
for sgna, va, sgnb, vb in itertools.product(signs, values, signs, values):
arr.append((sgna*va, sgnb*vb))
z = np.array(arr, dtype=float)
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.kl_div, w, z, rtol=1e-13, atol=1e-13)
def test_rel_entr():
def xfunc(x, y):
if x > 0 and y > 0:
return special.xlogy(x, x/y)
elif x == 0 and y >= 0:
return 0
else:
return np.inf
values = (0, 0.5, 1.0)
signs = [-1, 1]
arr = []
for sgna, va, sgnb, vb in itertools.product(signs, values, signs, values):
arr.append((sgna*va, sgnb*vb))
z = np.array(arr, dtype=float)
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.rel_entr, w, z, rtol=1e-13, atol=1e-13)
def test_huber():
assert_equal(special.huber(-1, 1.5), np.inf)
assert_allclose(special.huber(2, 1.5), 0.5 * np.square(1.5))
assert_allclose(special.huber(2, 2.5), 2 * (2.5 - 0.5 * 2))
def xfunc(delta, r):
if delta < 0:
return np.inf
elif np.abs(r) < delta:
return 0.5 * np.square(r)
else:
return delta * (np.abs(r) - 0.5 * delta)
z = np.random.randn(10, 2)
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.huber, w, z, rtol=1e-13, atol=1e-13)
def test_pseudo_huber():
def xfunc(delta, r):
if delta < 0:
return np.inf
elif (not delta) or (not r):
return 0
else:
return delta**2 * (np.sqrt(1 + (r/delta)**2) - 1)
z = np.array(np.random.randn(10, 2).tolist() + [[0, 0.5], [0.5, 0]])
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.pseudo_huber, w, z, rtol=1e-13, atol=1e-13)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
acogdev/ansible | test/integration/setup_gce.py | 163 | 1391 | '''
Create GCE resources for use in integration tests.
Takes a prefix as a command-line argument and creates two persistent disks named
${prefix}-base and ${prefix}-extra and a snapshot of the base disk named
${prefix}-snapshot. prefix will be forced to lowercase, to ensure the names are
legal GCE resource names.
'''
import sys
import optparse
import gce_credentials
def parse_args():
parser = optparse.OptionParser(
usage="%s [options] <prefix>" % (sys.argv[0],), description=__doc__)
gce_credentials.add_credentials_options(parser)
parser.add_option("--prefix",
action="store", dest="prefix",
help="String used to prefix GCE resource names (default: %default)")
(opts, args) = parser.parse_args()
gce_credentials.check_required(opts, parser)
if not args:
parser.error("Missing required argument: name prefix")
return (opts, args)
if __name__ == '__main__':
(opts, args) = parse_args()
gce = gce_credentials.get_gce_driver(opts)
prefix = args[0].lower()
try:
base_volume = gce.create_volume(
size=10, name=prefix+'-base', location='us-central1-a')
gce.create_volume_snapshot(base_volume, name=prefix+'-snapshot')
gce.create_volume(
size=10, name=prefix+'-extra', location='us-central1-a')
except KeyboardInterrupt as e:
print("\nExiting on user command.")
| gpl-3.0 |
joequery/django | tests/admin_widgets/models.py | 227 | 4760 | from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class MyFileField(models.FileField):
pass
@python_2_unicode_compatible
class Member(models.Model):
name = models.CharField(max_length=100)
birthdate = models.DateTimeField(blank=True, null=True)
gender = models.CharField(max_length=1, blank=True, choices=[('M', 'Male'), ('F', 'Female')])
email = models.EmailField(blank=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Band(models.Model):
name = models.CharField(max_length=100)
style = models.CharField(max_length=20)
members = models.ManyToManyField(Member)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Album(models.Model):
band = models.ForeignKey(Band, models.CASCADE)
name = models.CharField(max_length=100)
cover_art = models.FileField(upload_to='albums')
backside_art = MyFileField(upload_to='albums_back', null=True)
def __str__(self):
return self.name
class HiddenInventoryManager(models.Manager):
def get_queryset(self):
return super(HiddenInventoryManager, self).get_queryset().filter(hidden=False)
@python_2_unicode_compatible
class Inventory(models.Model):
barcode = models.PositiveIntegerField(unique=True)
parent = models.ForeignKey('self', models.SET_NULL, to_field='barcode', blank=True, null=True)
name = models.CharField(blank=False, max_length=20)
hidden = models.BooleanField(default=False)
# see #9258
default_manager = models.Manager()
objects = HiddenInventoryManager()
def __str__(self):
return self.name
class Event(models.Model):
main_band = models.ForeignKey(
Band,
models.CASCADE,
limit_choices_to=models.Q(pk__gt=0),
related_name='events_main_band_at',
)
supporting_bands = models.ManyToManyField(Band, blank=True, related_name='events_supporting_band_at')
start_date = models.DateField(blank=True, null=True)
start_time = models.TimeField(blank=True, null=True)
description = models.TextField(blank=True)
link = models.URLField(blank=True)
min_age = models.IntegerField(blank=True, null=True)
@python_2_unicode_compatible
class Car(models.Model):
owner = models.ForeignKey(User, models.CASCADE)
make = models.CharField(max_length=30)
model = models.CharField(max_length=30)
def __str__(self):
return "%s %s" % (self.make, self.model)
class CarTire(models.Model):
"""
A single car tire. This to test that a user can only select their own cars.
"""
car = models.ForeignKey(Car, models.CASCADE)
class Honeycomb(models.Model):
location = models.CharField(max_length=20)
class Bee(models.Model):
"""
A model with a FK to a model that won't be registered with the admin
(Honeycomb) so the corresponding raw ID widget won't have a magnifying
glass link to select related honeycomb instances.
"""
honeycomb = models.ForeignKey(Honeycomb, models.CASCADE)
class Individual(models.Model):
"""
A model with a FK to itself. It won't be registered with the admin, so the
corresponding raw ID widget won't have a magnifying glass link to select
related instances (rendering will be called programmatically in this case).
"""
name = models.CharField(max_length=20)
parent = models.ForeignKey('self', models.SET_NULL, null=True)
soulmate = models.ForeignKey('self', models.CASCADE, null=True, related_name='soulmates')
class Company(models.Model):
name = models.CharField(max_length=20)
class Advisor(models.Model):
"""
A model with a m2m to a model that won't be registered with the admin
(Company) so the corresponding raw ID widget won't have a magnifying
glass link to select related company instances.
"""
name = models.CharField(max_length=20)
companies = models.ManyToManyField(Company)
@python_2_unicode_compatible
class Student(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class Meta:
ordering = ('name',)
@python_2_unicode_compatible
class School(models.Model):
name = models.CharField(max_length=255)
students = models.ManyToManyField(Student, related_name='current_schools')
alumni = models.ManyToManyField(Student, related_name='previous_schools')
def __str__(self):
return self.name
@python_2_unicode_compatible
class Profile(models.Model):
user = models.ForeignKey('auth.User', models.CASCADE, to_field='username')
def __str__(self):
return self.user.username
| bsd-3-clause |
andykimpe/chromium-test-npapi | tools/telemetry/telemetry/core/backends/webdriver/webdriver_tab_list_backend.py | 13 | 1386 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core.backends.webdriver import webdriver_tab_backend
class WebDriverTabListBackend(object):
def __init__(self, browser_backend):
self._browser_backend = browser_backend
# Stores the window handles.
self._tab_list = []
self._UpdateTabList()
def New(self, timeout=None):
# Webdriver API doesn't support tab controlling.
raise NotImplementedError()
def __iter__(self):
self._UpdateTabList()
return self._tab_list.__iter__()
def __len__(self):
self._UpdateTabList()
return len(self._tab_list)
def __getitem__(self, index):
self._UpdateTabList()
if len(self._tab_list) <= index:
raise IndexError('list index out of range')
return self._tab_list[index]
def _UpdateTabList(self):
window_handles = self._browser_backend.driver.window_handles
old_tab_list = self._tab_list
self._tab_list = []
for window_handle in window_handles:
tab = None
for old_tab in old_tab_list:
if old_tab.window_handle == window_handle:
tab = old_tab
break
else:
tab = webdriver_tab_backend.WebDriverTabBackend(
self._browser_backend, window_handle)
self._tab_list.append(tab)
| bsd-3-clause |
zdary/intellij-community | python/helpers/pydev/pydevd_attach_to_process/winappdbg/interactive.py | 102 | 85840 | #!~/.wine/drive_c/Python25/python.exe
# -*- coding: utf-8 -*-
# Acknowledgements:
# Nicolas Economou, for his command line debugger on which this is inspired.
# http://tinyurl.com/nicolaseconomou
# Copyright (c) 2009-2014, Mario Vilas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Interactive debugging console.
@group Debugging:
ConsoleDebugger
@group Exceptions:
CmdError
"""
from __future__ import with_statement
__revision__ = "$Id$"
__all__ = [ 'ConsoleDebugger', 'CmdError' ]
# TODO document this module with docstrings.
# TODO command to set a last error breakpoint.
# TODO command to show available plugins.
from winappdbg import win32
from winappdbg import compat
from winappdbg.system import System
from winappdbg.util import PathOperations
from winappdbg.event import EventHandler, NoEvent
from winappdbg.textio import HexInput, HexOutput, HexDump, CrashDump, DebugLog
import os
import sys
import code
import time
import warnings
import traceback
# too many variables named "cmd" to have a module by the same name :P
from cmd import Cmd
# lazy imports
readline = None
#==============================================================================
class DummyEvent (NoEvent):
"Dummy event object used internally by L{ConsoleDebugger}."
def get_pid(self):
return self._pid
def get_tid(self):
return self._tid
def get_process(self):
return self._process
def get_thread(self):
return self._thread
#==============================================================================
class CmdError (Exception):
"""
Exception raised when a command parsing error occurs.
Used internally by L{ConsoleDebugger}.
"""
#==============================================================================
class ConsoleDebugger (Cmd, EventHandler):
"""
Interactive console debugger.
@see: L{Debug.interactive}
"""
#------------------------------------------------------------------------------
# Class variables
# Exception to raise when an error occurs executing a command.
command_error_exception = CmdError
# Milliseconds to wait for debug events in the main loop.
dwMilliseconds = 100
# History file name.
history_file = '.winappdbg_history'
# Confirm before quitting?
confirm_quit = True
# Valid plugin name characters.
valid_plugin_name_chars = 'ABCDEFGHIJKLMNOPQRSTUVWXY' \
'abcdefghijklmnopqrstuvwxy' \
'012345678' \
'_'
# Names of the registers.
segment_names = ( 'cs', 'ds', 'es', 'fs', 'gs' )
register_alias_64_to_32 = {
'eax':'Rax', 'ebx':'Rbx', 'ecx':'Rcx', 'edx':'Rdx',
'eip':'Rip', 'ebp':'Rbp', 'esp':'Rsp', 'esi':'Rsi', 'edi':'Rdi'
}
register_alias_64_to_16 = { 'ax':'Rax', 'bx':'Rbx', 'cx':'Rcx', 'dx':'Rdx' }
register_alias_64_to_8_low = { 'al':'Rax', 'bl':'Rbx', 'cl':'Rcx', 'dl':'Rdx' }
register_alias_64_to_8_high = { 'ah':'Rax', 'bh':'Rbx', 'ch':'Rcx', 'dh':'Rdx' }
register_alias_32_to_16 = { 'ax':'Eax', 'bx':'Ebx', 'cx':'Ecx', 'dx':'Edx' }
register_alias_32_to_8_low = { 'al':'Eax', 'bl':'Ebx', 'cl':'Ecx', 'dl':'Edx' }
register_alias_32_to_8_high = { 'ah':'Eax', 'bh':'Ebx', 'ch':'Ecx', 'dh':'Edx' }
register_aliases_full_32 = list(segment_names)
register_aliases_full_32.extend(compat.iterkeys(register_alias_32_to_16))
register_aliases_full_32.extend(compat.iterkeys(register_alias_32_to_8_low))
register_aliases_full_32.extend(compat.iterkeys(register_alias_32_to_8_high))
register_aliases_full_32 = tuple(register_aliases_full_32)
register_aliases_full_64 = list(segment_names)
register_aliases_full_64.extend(compat.iterkeys(register_alias_64_to_32))
register_aliases_full_64.extend(compat.iterkeys(register_alias_64_to_16))
register_aliases_full_64.extend(compat.iterkeys(register_alias_64_to_8_low))
register_aliases_full_64.extend(compat.iterkeys(register_alias_64_to_8_high))
register_aliases_full_64 = tuple(register_aliases_full_64)
# Names of the control flow instructions.
jump_instructions = (
'jmp', 'jecxz', 'jcxz',
'ja', 'jnbe', 'jae', 'jnb', 'jb', 'jnae', 'jbe', 'jna', 'jc', 'je',
'jz', 'jnc', 'jne', 'jnz', 'jnp', 'jpo', 'jp', 'jpe', 'jg', 'jnle',
'jge', 'jnl', 'jl', 'jnge', 'jle', 'jng', 'jno', 'jns', 'jo', 'js'
)
call_instructions = ( 'call', 'ret', 'retn' )
loop_instructions = ( 'loop', 'loopz', 'loopnz', 'loope', 'loopne' )
control_flow_instructions = call_instructions + loop_instructions + \
jump_instructions
#------------------------------------------------------------------------------
# Instance variables
def __init__(self):
"""
Interactive console debugger.
@see: L{Debug.interactive}
"""
Cmd.__init__(self)
EventHandler.__init__(self)
# Quit the debugger when True.
self.debuggerExit = False
# Full path to the history file.
self.history_file_full_path = None
# Last executed command.
self.__lastcmd = ""
#------------------------------------------------------------------------------
# Debugger
# Use this Debug object.
def start_using_debugger(self, debug):
# Clear the previous Debug object.
self.stop_using_debugger()
# Keep the Debug object.
self.debug = debug
# Set ourselves as the event handler for the debugger.
self.prevHandler = debug.set_event_handler(self)
# Stop using the Debug object given by start_using_debugger().
# Circular references must be removed, or the destructors never get called.
def stop_using_debugger(self):
if hasattr(self, 'debug'):
debug = self.debug
debug.set_event_handler(self.prevHandler)
del self.prevHandler
del self.debug
return debug
return None
# Destroy the Debug object.
def destroy_debugger(self, autodetach = True):
debug = self.stop_using_debugger()
if debug is not None:
if not autodetach:
debug.kill_all(bIgnoreExceptions=True)
debug.lastEvent = None
debug.stop()
del debug
@property
def lastEvent(self):
return self.debug.lastEvent
def set_fake_last_event(self, process):
if self.lastEvent is None:
self.debug.lastEvent = DummyEvent(self.debug)
self.debug.lastEvent._process = process
self.debug.lastEvent._thread = process.get_thread(
process.get_thread_ids()[0])
self.debug.lastEvent._pid = process.get_pid()
self.debug.lastEvent._tid = self.lastEvent._thread.get_tid()
#------------------------------------------------------------------------------
# Input
# TODO
# * try to guess breakpoints when insufficient data is given
# * child Cmd instances will have to be used for other prompts, for example
# when assembling or editing memory - it may also be a good idea to think
# if it's possible to make the main Cmd instance also a child, instead of
# the debugger itself - probably the same goes for the EventHandler, maybe
# it can be used as a contained object rather than a parent class.
# Join a token list into an argument string.
def join_tokens(self, token_list):
return self.debug.system.argv_to_cmdline(token_list)
# Split an argument string into a token list.
def split_tokens(self, arg, min_count = 0, max_count = None):
token_list = self.debug.system.cmdline_to_argv(arg)
if len(token_list) < min_count:
raise CmdError("missing parameters.")
if max_count and len(token_list) > max_count:
raise CmdError("too many parameters.")
return token_list
# Token is a thread ID or name.
def input_thread(self, token):
targets = self.input_thread_list( [token] )
if len(targets) == 0:
raise CmdError("missing thread name or ID")
if len(targets) > 1:
msg = "more than one thread with that name:\n"
for tid in targets:
msg += "\t%d\n" % tid
msg = msg[:-len("\n")]
raise CmdError(msg)
return targets[0]
# Token list is a list of thread IDs or names.
def input_thread_list(self, token_list):
targets = set()
system = self.debug.system
for token in token_list:
try:
tid = self.input_integer(token)
if not system.has_thread(tid):
raise CmdError("thread not found (%d)" % tid)
targets.add(tid)
except ValueError:
found = set()
for process in system.iter_processes():
found.update( system.find_threads_by_name(token) )
if not found:
raise CmdError("thread not found (%s)" % token)
for thread in found:
targets.add( thread.get_tid() )
targets = list(targets)
targets.sort()
return targets
# Token is a process ID or name.
def input_process(self, token):
targets = self.input_process_list( [token] )
if len(targets) == 0:
raise CmdError("missing process name or ID")
if len(targets) > 1:
msg = "more than one process with that name:\n"
for pid in targets:
msg += "\t%d\n" % pid
msg = msg[:-len("\n")]
raise CmdError(msg)
return targets[0]
# Token list is a list of process IDs or names.
def input_process_list(self, token_list):
targets = set()
system = self.debug.system
for token in token_list:
try:
pid = self.input_integer(token)
if not system.has_process(pid):
raise CmdError("process not found (%d)" % pid)
targets.add(pid)
except ValueError:
found = system.find_processes_by_filename(token)
if not found:
raise CmdError("process not found (%s)" % token)
for (process, _) in found:
targets.add( process.get_pid() )
targets = list(targets)
targets.sort()
return targets
# Token is a command line to execute.
def input_command_line(self, command_line):
argv = self.debug.system.cmdline_to_argv(command_line)
if not argv:
raise CmdError("missing command line to execute")
fname = argv[0]
if not os.path.exists(fname):
try:
fname, _ = win32.SearchPath(None, fname, '.exe')
except WindowsError:
raise CmdError("file not found: %s" % fname)
argv[0] = fname
command_line = self.debug.system.argv_to_cmdline(argv)
return command_line
# Token is an integer.
# Only hexadecimal format is supported.
def input_hexadecimal_integer(self, token):
return int(token, 0x10)
# Token is an integer.
# It can be in any supported format.
def input_integer(self, token):
return HexInput.integer(token)
## input_integer = input_hexadecimal_integer
# Token is an address.
# The address can be a integer, a label or a register.
def input_address(self, token, pid = None, tid = None):
address = None
if self.is_register(token):
if tid is None:
if self.lastEvent is None or pid != self.lastEvent.get_pid():
msg = "can't resolve register (%s) for unknown thread"
raise CmdError(msg % token)
tid = self.lastEvent.get_tid()
address = self.input_register(token, tid)
if address is None:
try:
address = self.input_hexadecimal_integer(token)
except ValueError:
if pid is None:
if self.lastEvent is None:
raise CmdError("no current process set")
process = self.lastEvent.get_process()
elif self.lastEvent is not None and pid == self.lastEvent.get_pid():
process = self.lastEvent.get_process()
else:
try:
process = self.debug.system.get_process(pid)
except KeyError:
raise CmdError("process not found (%d)" % pid)
try:
address = process.resolve_label(token)
except Exception:
raise CmdError("unknown address (%s)" % token)
return address
# Token is an address range, or a single address.
# The addresses can be integers, labels or registers.
def input_address_range(self, token_list, pid = None, tid = None):
if len(token_list) == 2:
token_1, token_2 = token_list
address = self.input_address(token_1, pid, tid)
try:
size = self.input_integer(token_2)
except ValueError:
raise CmdError("bad address range: %s %s" % (token_1, token_2))
elif len(token_list) == 1:
token = token_list[0]
if '-' in token:
try:
token_1, token_2 = token.split('-')
except Exception:
raise CmdError("bad address range: %s" % token)
address = self.input_address(token_1, pid, tid)
size = self.input_address(token_2, pid, tid) - address
else:
address = self.input_address(token, pid, tid)
size = None
return address, size
# XXX TODO
# Support non-integer registers here.
def is_register(self, token):
if win32.arch == 'i386':
if token in self.register_aliases_full_32:
return True
token = token.title()
for (name, typ) in win32.CONTEXT._fields_:
if name == token:
return win32.sizeof(typ) == win32.sizeof(win32.DWORD)
elif win32.arch == 'amd64':
if token in self.register_aliases_full_64:
return True
token = token.title()
for (name, typ) in win32.CONTEXT._fields_:
if name == token:
return win32.sizeof(typ) == win32.sizeof(win32.DWORD64)
return False
# The token is a register name.
# Returns None if no register name is matched.
def input_register(self, token, tid = None):
if tid is None:
if self.lastEvent is None:
raise CmdError("no current process set")
thread = self.lastEvent.get_thread()
else:
thread = self.debug.system.get_thread(tid)
ctx = thread.get_context()
token = token.lower()
title = token.title()
if title in ctx:
return ctx.get(title) # eax -> Eax
if ctx.arch == 'i386':
if token in self.segment_names:
return ctx.get( 'Seg%s' % title ) # cs -> SegCs
if token in self.register_alias_32_to_16:
return ctx.get( self.register_alias_32_to_16[token] ) & 0xFFFF
if token in self.register_alias_32_to_8_low:
return ctx.get( self.register_alias_32_to_8_low[token] ) & 0xFF
if token in self.register_alias_32_to_8_high:
return (ctx.get( self.register_alias_32_to_8_high[token] ) & 0xFF00) >> 8
elif ctx.arch == 'amd64':
if token in self.segment_names:
return ctx.get( 'Seg%s' % title ) # cs -> SegCs
if token in self.register_alias_64_to_32:
return ctx.get( self.register_alias_64_to_32[token] ) & 0xFFFFFFFF
if token in self.register_alias_64_to_16:
return ctx.get( self.register_alias_64_to_16[token] ) & 0xFFFF
if token in self.register_alias_64_to_8_low:
return ctx.get( self.register_alias_64_to_8_low[token] ) & 0xFF
if token in self.register_alias_64_to_8_high:
return (ctx.get( self.register_alias_64_to_8_high[token] ) & 0xFF00) >> 8
return None
# Token list contains an address or address range.
# The prefix is also parsed looking for process and thread IDs.
def input_full_address_range(self, token_list):
pid, tid = self.get_process_and_thread_ids_from_prefix()
address, size = self.input_address_range(token_list, pid, tid)
return pid, tid, address, size
# Token list contains a breakpoint.
def input_breakpoint(self, token_list):
pid, tid, address, size = self.input_full_address_range(token_list)
if not self.debug.is_debugee(pid):
raise CmdError("target process is not being debugged")
return pid, tid, address, size
# Token list contains a memory address, and optional size and process.
# Sets the results as the default for the next display command.
def input_display(self, token_list, default_size = 64):
pid, tid, address, size = self.input_full_address_range(token_list)
if not size:
size = default_size
next_address = HexOutput.integer(address + size)
self.default_display_target = next_address
return pid, tid, address, size
#------------------------------------------------------------------------------
# Output
# Tell the user a module was loaded.
def print_module_load(self, event):
mod = event.get_module()
base = mod.get_base()
name = mod.get_filename()
if not name:
name = ''
msg = "Loaded module (%s) %s"
msg = msg % (HexDump.address(base), name)
print(msg)
# Tell the user a module was unloaded.
def print_module_unload(self, event):
mod = event.get_module()
base = mod.get_base()
name = mod.get_filename()
if not name:
name = ''
msg = "Unloaded module (%s) %s"
msg = msg % (HexDump.address(base), name)
print(msg)
# Tell the user a process was started.
def print_process_start(self, event):
pid = event.get_pid()
start = event.get_start_address()
if start:
start = HexOutput.address(start)
print("Started process %d at %s" % (pid, start))
else:
print("Attached to process %d" % pid)
# Tell the user a thread was started.
def print_thread_start(self, event):
tid = event.get_tid()
start = event.get_start_address()
if start:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
start = event.get_process().get_label_at_address(start)
print("Started thread %d at %s" % (tid, start))
else:
print("Attached to thread %d" % tid)
# Tell the user a process has finished.
def print_process_end(self, event):
pid = event.get_pid()
code = event.get_exit_code()
print("Process %d terminated, exit code %d" % (pid, code))
# Tell the user a thread has finished.
def print_thread_end(self, event):
tid = event.get_tid()
code = event.get_exit_code()
print("Thread %d terminated, exit code %d" % (tid, code))
# Print(debug strings.
def print_debug_string(self, event):
tid = event.get_tid()
string = event.get_debug_string()
print("Thread %d says: %r" % (tid, string))
# Inform the user of any other debugging event.
def print_event(self, event):
code = HexDump.integer( event.get_event_code() )
name = event.get_event_name()
desc = event.get_event_description()
if code in desc:
print('')
print("%s: %s" % (name, desc))
else:
print('')
print("%s (%s): %s" % (name, code, desc))
self.print_event_location(event)
# Stop on exceptions and prompt for commands.
def print_exception(self, event):
address = HexDump.address( event.get_exception_address() )
code = HexDump.integer( event.get_exception_code() )
desc = event.get_exception_description()
if event.is_first_chance():
chance = 'first'
else:
chance = 'second'
if code in desc:
msg = "%s at address %s (%s chance)" % (desc, address, chance)
else:
msg = "%s (%s) at address %s (%s chance)" % (desc, code, address, chance)
print('')
print(msg)
self.print_event_location(event)
# Show the current location in the code.
def print_event_location(self, event):
process = event.get_process()
thread = event.get_thread()
self.print_current_location(process, thread)
# Show the current location in the code.
def print_breakpoint_location(self, event):
process = event.get_process()
thread = event.get_thread()
pc = event.get_exception_address()
self.print_current_location(process, thread, pc)
# Show the current location in any process and thread.
def print_current_location(self, process = None, thread = None, pc = None):
if not process:
if self.lastEvent is None:
raise CmdError("no current process set")
process = self.lastEvent.get_process()
if not thread:
if self.lastEvent is None:
raise CmdError("no current process set")
thread = self.lastEvent.get_thread()
thread.suspend()
try:
if pc is None:
pc = thread.get_pc()
ctx = thread.get_context()
finally:
thread.resume()
label = process.get_label_at_address(pc)
try:
disasm = process.disassemble(pc, 15)
except WindowsError:
disasm = None
except NotImplementedError:
disasm = None
print('')
print(CrashDump.dump_registers(ctx),)
print("%s:" % label)
if disasm:
print(CrashDump.dump_code_line(disasm[0], pc, bShowDump = True))
else:
try:
data = process.peek(pc, 15)
except Exception:
data = None
if data:
print('%s: %s' % (HexDump.address(pc), HexDump.hexblock_byte(data)))
else:
print('%s: ???' % HexDump.address(pc))
# Display memory contents using a given method.
def print_memory_display(self, arg, method):
if not arg:
arg = self.default_display_target
token_list = self.split_tokens(arg, 1, 2)
pid, tid, address, size = self.input_display(token_list)
label = self.get_process(pid).get_label_at_address(address)
data = self.read_memory(address, size, pid)
if data:
print("%s:" % label)
print(method(data, address),)
#------------------------------------------------------------------------------
# Debugging
# Get the process ID from the prefix or the last event.
def get_process_id_from_prefix(self):
if self.cmdprefix:
pid = self.input_process(self.cmdprefix)
else:
if self.lastEvent is None:
raise CmdError("no current process set")
pid = self.lastEvent.get_pid()
return pid
# Get the thread ID from the prefix or the last event.
def get_thread_id_from_prefix(self):
if self.cmdprefix:
tid = self.input_thread(self.cmdprefix)
else:
if self.lastEvent is None:
raise CmdError("no current process set")
tid = self.lastEvent.get_tid()
return tid
# Get the process from the prefix or the last event.
def get_process_from_prefix(self):
pid = self.get_process_id_from_prefix()
return self.get_process(pid)
# Get the thread from the prefix or the last event.
def get_thread_from_prefix(self):
tid = self.get_thread_id_from_prefix()
return self.get_thread(tid)
# Get the process and thread IDs from the prefix or the last event.
def get_process_and_thread_ids_from_prefix(self):
if self.cmdprefix:
try:
pid = self.input_process(self.cmdprefix)
tid = None
except CmdError:
try:
tid = self.input_thread(self.cmdprefix)
pid = self.debug.system.get_thread(tid).get_pid()
except CmdError:
msg = "unknown process or thread (%s)" % self.cmdprefix
raise CmdError(msg)
else:
if self.lastEvent is None:
raise CmdError("no current process set")
pid = self.lastEvent.get_pid()
tid = self.lastEvent.get_tid()
return pid, tid
# Get the process and thread from the prefix or the last event.
def get_process_and_thread_from_prefix(self):
pid, tid = self.get_process_and_thread_ids_from_prefix()
process = self.get_process(pid)
thread = self.get_thread(tid)
return process, thread
# Get the process object.
def get_process(self, pid = None):
if pid is None:
if self.lastEvent is None:
raise CmdError("no current process set")
process = self.lastEvent.get_process()
elif self.lastEvent is not None and pid == self.lastEvent.get_pid():
process = self.lastEvent.get_process()
else:
try:
process = self.debug.system.get_process(pid)
except KeyError:
raise CmdError("process not found (%d)" % pid)
return process
# Get the thread object.
def get_thread(self, tid = None):
if tid is None:
if self.lastEvent is None:
raise CmdError("no current process set")
thread = self.lastEvent.get_thread()
elif self.lastEvent is not None and tid == self.lastEvent.get_tid():
thread = self.lastEvent.get_thread()
else:
try:
thread = self.debug.system.get_thread(tid)
except KeyError:
raise CmdError("thread not found (%d)" % tid)
return thread
# Read the process memory.
def read_memory(self, address, size, pid = None):
process = self.get_process(pid)
try:
data = process.peek(address, size)
except WindowsError:
orig_address = HexOutput.integer(address)
next_address = HexOutput.integer(address + size)
msg = "error reading process %d, from %s to %s (%d bytes)"
msg = msg % (pid, orig_address, next_address, size)
raise CmdError(msg)
return data
# Write the process memory.
def write_memory(self, address, data, pid = None):
process = self.get_process(pid)
try:
process.write(address, data)
except WindowsError:
size = len(data)
orig_address = HexOutput.integer(address)
next_address = HexOutput.integer(address + size)
msg = "error reading process %d, from %s to %s (%d bytes)"
msg = msg % (pid, orig_address, next_address, size)
raise CmdError(msg)
# Change a register value.
def change_register(self, register, value, tid = None):
# Get the thread.
if tid is None:
if self.lastEvent is None:
raise CmdError("no current process set")
thread = self.lastEvent.get_thread()
else:
try:
thread = self.debug.system.get_thread(tid)
except KeyError:
raise CmdError("thread not found (%d)" % tid)
# Convert the value to integer type.
try:
value = self.input_integer(value)
except ValueError:
pid = thread.get_pid()
value = self.input_address(value, pid, tid)
# Suspend the thread.
# The finally clause ensures the thread is resumed before returning.
thread.suspend()
try:
# Get the current context.
ctx = thread.get_context()
# Register name matching is case insensitive.
register = register.lower()
# Integer 32 bits registers.
if register in self.register_names:
register = register.title() # eax -> Eax
# Segment (16 bit) registers.
if register in self.segment_names:
register = 'Seg%s' % register.title() # cs -> SegCs
value = value & 0x0000FFFF
# Integer 16 bits registers.
if register in self.register_alias_16:
register = self.register_alias_16[register]
previous = ctx.get(register) & 0xFFFF0000
value = (value & 0x0000FFFF) | previous
# Integer 8 bits registers (low part).
if register in self.register_alias_8_low:
register = self.register_alias_8_low[register]
previous = ctx.get(register) % 0xFFFFFF00
value = (value & 0x000000FF) | previous
# Integer 8 bits registers (high part).
if register in self.register_alias_8_high:
register = self.register_alias_8_high[register]
previous = ctx.get(register) % 0xFFFF00FF
value = ((value & 0x000000FF) << 8) | previous
# Set the new context.
ctx.__setitem__(register, value)
thread.set_context(ctx)
# Resume the thread.
finally:
thread.resume()
# Very crude way to find data within the process memory.
# TODO: Perhaps pfind.py can be integrated here instead.
def find_in_memory(self, query, process):
for mbi in process.get_memory_map():
if mbi.State != win32.MEM_COMMIT or mbi.Protect & win32.PAGE_GUARD:
continue
address = mbi.BaseAddress
size = mbi.RegionSize
try:
data = process.read(address, size)
except WindowsError:
msg = "*** Warning: read error at address %s"
msg = msg % HexDump.address(address)
print(msg)
width = min(len(query), 16)
p = data.find(query)
while p >= 0:
q = p + len(query)
d = data[ p : min(q, p + width) ]
h = HexDump.hexline(d, width = width)
a = HexDump.address(address + p)
print("%s: %s" % (a, h))
p = data.find(query, q)
# Kill a process.
def kill_process(self, pid):
process = self.debug.system.get_process(pid)
try:
process.kill()
if self.debug.is_debugee(pid):
self.debug.detach(pid)
print("Killed process (%d)" % pid)
except Exception:
print("Error trying to kill process (%d)" % pid)
# Kill a thread.
def kill_thread(self, tid):
thread = self.debug.system.get_thread(tid)
try:
thread.kill()
process = thread.get_process()
pid = process.get_pid()
if self.debug.is_debugee(pid) and not process.is_alive():
self.debug.detach(pid)
print("Killed thread (%d)" % tid)
except Exception:
print("Error trying to kill thread (%d)" % tid)
#------------------------------------------------------------------------------
# Command prompt input
# Prompt the user for commands.
def prompt_user(self):
while not self.debuggerExit:
try:
self.cmdloop()
break
except CmdError:
e = sys.exc_info()[1]
print("*** Error: %s" % str(e))
except Exception:
traceback.print_exc()
## self.debuggerExit = True
# Prompt the user for a YES/NO kind of question.
def ask_user(self, msg, prompt = "Are you sure? (y/N): "):
print(msg)
answer = raw_input(prompt)
answer = answer.strip()[:1].lower()
return answer == 'y'
# Autocomplete the given command when not ambiguous.
# Convert it to lowercase (so commands are seen as case insensitive).
def autocomplete(self, cmd):
cmd = cmd.lower()
completed = self.completenames(cmd)
if len(completed) == 1:
cmd = completed[0]
return cmd
# Get the help text for the given list of command methods.
# Note it's NOT a list of commands, but a list of actual method names.
# Each line of text is stripped and all lines are sorted.
# Repeated text lines are removed.
# Returns a single, possibly multiline, string.
def get_help(self, commands):
msg = set()
for name in commands:
if name != 'do_help':
try:
doc = getattr(self, name).__doc__.split('\n')
except Exception:
return ( "No help available when Python"
" is run with the -OO switch." )
for x in doc:
x = x.strip()
if x:
msg.add(' %s' % x)
msg = list(msg)
msg.sort()
msg = '\n'.join(msg)
return msg
# Parse the prefix and remove it from the command line.
def split_prefix(self, line):
prefix = None
if line.startswith('~'):
pos = line.find(' ')
if pos == 1:
pos = line.find(' ', pos + 1)
if not pos < 0:
prefix = line[ 1 : pos ].strip()
line = line[ pos : ].strip()
return prefix, line
#------------------------------------------------------------------------------
# Cmd() hacks
# Header for help page.
doc_header = 'Available commands (type help * or help <command>)'
## # Read and write directly to stdin and stdout.
## # This prevents the use of raw_input and print.
## use_rawinput = False
@property
def prompt(self):
if self.lastEvent:
pid = self.lastEvent.get_pid()
tid = self.lastEvent.get_tid()
if self.debug.is_debugee(pid):
## return '~%d(%d)> ' % (tid, pid)
return '%d:%d> ' % (pid, tid)
return '> '
# Return a sorted list of method names.
# Only returns the methods that implement commands.
def get_names(self):
names = Cmd.get_names(self)
names = [ x for x in set(names) if x.startswith('do_') ]
names.sort()
return names
# Automatically autocomplete commands, even if Tab wasn't pressed.
# The prefix is removed from the line and stored in self.cmdprefix.
# Also implement the commands that consist of a symbol character.
def parseline(self, line):
self.cmdprefix, line = self.split_prefix(line)
line = line.strip()
if line:
if line[0] == '.':
line = 'plugin ' + line[1:]
elif line[0] == '#':
line = 'python ' + line[1:]
cmd, arg, line = Cmd.parseline(self, line)
if cmd:
cmd = self.autocomplete(cmd)
return cmd, arg, line
## # Don't repeat the last executed command.
## def emptyline(self):
## pass
# Reset the defaults for some commands.
def preloop(self):
self.default_disasm_target = 'eip'
self.default_display_target = 'eip'
self.last_display_command = self.do_db
# Put the prefix back in the command line.
def get_lastcmd(self):
return self.__lastcmd
def set_lastcmd(self, lastcmd):
if self.cmdprefix:
lastcmd = '~%s %s' % (self.cmdprefix, lastcmd)
self.__lastcmd = lastcmd
lastcmd = property(get_lastcmd, set_lastcmd)
# Quit the command prompt if the debuggerExit flag is on.
def postcmd(self, stop, line):
return stop or self.debuggerExit
#------------------------------------------------------------------------------
# Commands
# Each command contains a docstring with it's help text.
# The help text consist of independent text lines,
# where each line shows a command and it's parameters.
# Each command method has the help message for itself and all it's aliases.
# Only the docstring for the "help" command is shown as-is.
# NOTE: Command methods MUST be all lowercase!
# Extended help command.
def do_help(self, arg):
"""
? - show the list of available commands
? * - show help for all commands
? <command> [command...] - show help for the given command(s)
help - show the list of available commands
help * - show help for all commands
help <command> [command...] - show help for the given command(s)
"""
if not arg:
Cmd.do_help(self, arg)
elif arg in ('?', 'help'):
# An easter egg :)
print(" Help! I need somebody...")
print(" Help! Not just anybody...")
print(" Help! You know, I need someone...")
print(" Heeelp!")
else:
if arg == '*':
commands = self.get_names()
commands = [ x for x in commands if x.startswith('do_') ]
else:
commands = set()
for x in arg.split(' '):
x = x.strip()
if x:
for n in self.completenames(x):
commands.add( 'do_%s' % n )
commands = list(commands)
commands.sort()
print(self.get_help(commands))
def do_shell(self, arg):
"""
! - spawn a system shell
shell - spawn a system shell
! <command> [arguments...] - execute a single shell command
shell <command> [arguments...] - execute a single shell command
"""
if self.cmdprefix:
raise CmdError("prefix not allowed")
# Try to use the environment to locate cmd.exe.
# If not found, it's usually OK to just use the filename,
# since cmd.exe is one of those "magic" programs that
# can be automatically found by CreateProcess.
shell = os.getenv('ComSpec', 'cmd.exe')
# When given a command, run it and return.
# When no command is given, spawn a shell.
if arg:
arg = '%s /c %s' % (shell, arg)
else:
arg = shell
process = self.debug.system.start_process(arg, bConsole = True)
process.wait()
# This hack fixes a bug in Python, the interpreter console is closing the
# stdin pipe when calling the exit() function (Ctrl+Z seems to work fine).
class _PythonExit(object):
def __repr__(self):
return "Use exit() or Ctrl-Z plus Return to exit"
def __call__(self):
raise SystemExit()
_python_exit = _PythonExit()
# Spawns a Python shell with some handy local variables and the winappdbg
# module already imported. Also the console banner is improved.
def _spawn_python_shell(self, arg):
import winappdbg
banner = ('Python %s on %s\nType "help", "copyright", '
'"credits" or "license" for more information.\n')
platform = winappdbg.version.lower()
platform = 'WinAppDbg %s' % platform
banner = banner % (sys.version, platform)
local = {}
local.update(__builtins__)
local.update({
'__name__' : '__console__',
'__doc__' : None,
'exit' : self._python_exit,
'self' : self,
'arg' : arg,
'winappdbg' : winappdbg,
})
try:
code.interact(banner=banner, local=local)
except SystemExit:
# We need to catch it so it doesn't kill our program.
pass
def do_python(self, arg):
"""
# - spawn a python interpreter
python - spawn a python interpreter
# <statement> - execute a single python statement
python <statement> - execute a single python statement
"""
if self.cmdprefix:
raise CmdError("prefix not allowed")
# When given a Python statement, execute it directly.
if arg:
try:
compat.exec_(arg, globals(), locals())
except Exception:
traceback.print_exc()
# When no statement is given, spawn a Python interpreter.
else:
try:
self._spawn_python_shell(arg)
except Exception:
e = sys.exc_info()[1]
raise CmdError(
"unhandled exception when running Python console: %s" % e)
# The plugins interface is quite simple.
#
# Just place a .py file with the plugin name in the "plugins" folder,
# for example "do_example.py" would implement the "example" command.
#
# The plugin must have a function named "do", which implements the
# command functionality exactly like the do_* methods of Cmd instances.
#
# The docstring for the "do" function will be parsed exactly like
# one of the debugger's commands - that is, each line is treated
# independently.
#
def do_plugin(self, arg):
"""
[~prefix] .<name> [arguments] - run a plugin command
[~prefix] plugin <name> [arguments] - run a plugin command
"""
pos = arg.find(' ')
if pos < 0:
name = arg
arg = ''
else:
name = arg[:pos]
arg = arg[pos:].strip()
if not name:
raise CmdError("missing plugin name")
for c in name:
if c not in self.valid_plugin_name_chars:
raise CmdError("invalid plugin name: %r" % name)
name = 'winappdbg.plugins.do_%s' % name
try:
plugin = __import__(name)
components = name.split('.')
for comp in components[1:]:
plugin = getattr(plugin, comp)
reload(plugin)
except ImportError:
raise CmdError("plugin not found: %s" % name)
try:
return plugin.do(self, arg)
except CmdError:
raise
except Exception:
e = sys.exc_info()[1]
## traceback.print_exc(e) # XXX DEBUG
raise CmdError("unhandled exception in plugin: %s" % e)
def do_quit(self, arg):
"""
quit - close the debugging session
q - close the debugging session
"""
if self.cmdprefix:
raise CmdError("prefix not allowed")
if arg:
raise CmdError("too many arguments")
if self.confirm_quit:
count = self.debug.get_debugee_count()
if count > 0:
if count == 1:
msg = "There's a program still running."
else:
msg = "There are %s programs still running." % count
if not self.ask_user(msg):
return False
self.debuggerExit = True
return True
do_q = do_quit
def do_attach(self, arg):
"""
attach <target> [target...] - attach to the given process(es)
"""
if self.cmdprefix:
raise CmdError("prefix not allowed")
targets = self.input_process_list( self.split_tokens(arg, 1) )
if not targets:
print("Error: missing parameters")
else:
debug = self.debug
for pid in targets:
try:
debug.attach(pid)
print("Attached to process (%d)" % pid)
except Exception:
print("Error: can't attach to process (%d)" % pid)
def do_detach(self, arg):
"""
[~process] detach - detach from the current process
detach - detach from the current process
detach <target> [target...] - detach from the given process(es)
"""
debug = self.debug
token_list = self.split_tokens(arg)
if self.cmdprefix:
token_list.insert(0, self.cmdprefix)
targets = self.input_process_list(token_list)
if not targets:
if self.lastEvent is None:
raise CmdError("no current process set")
targets = [ self.lastEvent.get_pid() ]
for pid in targets:
try:
debug.detach(pid)
print("Detached from process (%d)" % pid)
except Exception:
print("Error: can't detach from process (%d)" % pid)
def do_windowed(self, arg):
"""
windowed <target> [arguments...] - run a windowed program for debugging
"""
if self.cmdprefix:
raise CmdError("prefix not allowed")
cmdline = self.input_command_line(arg)
try:
process = self.debug.execl(arg,
bConsole = False,
bFollow = self.options.follow)
print("Spawned process (%d)" % process.get_pid())
except Exception:
raise CmdError("can't execute")
self.set_fake_last_event(process)
def do_console(self, arg):
"""
console <target> [arguments...] - run a console program for debugging
"""
if self.cmdprefix:
raise CmdError("prefix not allowed")
cmdline = self.input_command_line(arg)
try:
process = self.debug.execl(arg,
bConsole = True,
bFollow = self.options.follow)
print("Spawned process (%d)" % process.get_pid())
except Exception:
raise CmdError("can't execute")
self.set_fake_last_event(process)
def do_continue(self, arg):
"""
continue - continue execution
g - continue execution
go - continue execution
"""
if self.cmdprefix:
raise CmdError("prefix not allowed")
if arg:
raise CmdError("too many arguments")
if self.debug.get_debugee_count() > 0:
return True
do_g = do_continue
do_go = do_continue
def do_gh(self, arg):
"""
gh - go with exception handled
"""
if self.cmdprefix:
raise CmdError("prefix not allowed")
if arg:
raise CmdError("too many arguments")
if self.lastEvent:
self.lastEvent.continueStatus = win32.DBG_EXCEPTION_HANDLED
return self.do_go(arg)
def do_gn(self, arg):
"""
gn - go with exception not handled
"""
if self.cmdprefix:
raise CmdError("prefix not allowed")
if arg:
raise CmdError("too many arguments")
if self.lastEvent:
self.lastEvent.continueStatus = win32.DBG_EXCEPTION_NOT_HANDLED
return self.do_go(arg)
def do_refresh(self, arg):
"""
refresh - refresh the list of running processes and threads
[~process] refresh - refresh the list of running threads
"""
if arg:
raise CmdError("too many arguments")
if self.cmdprefix:
process = self.get_process_from_prefix()
process.scan()
else:
self.debug.system.scan()
def do_processlist(self, arg):
"""
pl - show the processes being debugged
processlist - show the processes being debugged
"""
if self.cmdprefix:
raise CmdError("prefix not allowed")
if arg:
raise CmdError("too many arguments")
system = self.debug.system
pid_list = self.debug.get_debugee_pids()
if pid_list:
print("Process ID File name")
for pid in pid_list:
if pid == 0:
filename = "System Idle Process"
elif pid == 4:
filename = "System"
else:
filename = system.get_process(pid).get_filename()
filename = PathOperations.pathname_to_filename(filename)
print("%-12d %s" % (pid, filename))
do_pl = do_processlist
def do_threadlist(self, arg):
"""
tl - show the threads being debugged
threadlist - show the threads being debugged
"""
if arg:
raise CmdError("too many arguments")
if self.cmdprefix:
process = self.get_process_from_prefix()
for thread in process.iter_threads():
tid = thread.get_tid()
name = thread.get_name()
print("%-12d %s" % (tid, name))
else:
system = self.debug.system
pid_list = self.debug.get_debugee_pids()
if pid_list:
print("Thread ID Thread name")
for pid in pid_list:
process = system.get_process(pid)
for thread in process.iter_threads():
tid = thread.get_tid()
name = thread.get_name()
print("%-12d %s" % (tid, name))
do_tl = do_threadlist
def do_kill(self, arg):
"""
[~process] kill - kill a process
[~thread] kill - kill a thread
kill - kill the current process
kill * - kill all debugged processes
kill <processes and/or threads...> - kill the given processes and threads
"""
if arg:
if arg == '*':
target_pids = self.debug.get_debugee_pids()
target_tids = list()
else:
target_pids = set()
target_tids = set()
if self.cmdprefix:
pid, tid = self.get_process_and_thread_ids_from_prefix()
if tid is None:
target_tids.add(tid)
else:
target_pids.add(pid)
for token in self.split_tokens(arg):
try:
pid = self.input_process(token)
target_pids.add(pid)
except CmdError:
try:
tid = self.input_process(token)
target_pids.add(pid)
except CmdError:
msg = "unknown process or thread (%s)" % token
raise CmdError(msg)
target_pids = list(target_pids)
target_tids = list(target_tids)
target_pids.sort()
target_tids.sort()
msg = "You are about to kill %d processes and %d threads."
msg = msg % ( len(target_pids), len(target_tids) )
if self.ask_user(msg):
for pid in target_pids:
self.kill_process(pid)
for tid in target_tids:
self.kill_thread(tid)
else:
if self.cmdprefix:
pid, tid = self.get_process_and_thread_ids_from_prefix()
if tid is None:
if self.lastEvent is not None and pid == self.lastEvent.get_pid():
msg = "You are about to kill the current process."
else:
msg = "You are about to kill process %d." % pid
if self.ask_user(msg):
self.kill_process(pid)
else:
if self.lastEvent is not None and tid == self.lastEvent.get_tid():
msg = "You are about to kill the current thread."
else:
msg = "You are about to kill thread %d." % tid
if self.ask_user(msg):
self.kill_thread(tid)
else:
if self.lastEvent is None:
raise CmdError("no current process set")
pid = self.lastEvent.get_pid()
if self.ask_user("You are about to kill the current process."):
self.kill_process(pid)
# TODO: create hidden threads using undocumented API calls.
def do_modload(self, arg):
"""
[~process] modload <filename.dll> - load a DLL module
"""
filename = self.split_tokens(arg, 1, 1)[0]
process = self.get_process_from_prefix()
try:
process.inject_dll(filename, bWait=False)
except RuntimeError:
print("Can't inject module: %r" % filename)
# TODO: modunload
def do_stack(self, arg):
"""
[~thread] k - show the stack trace
[~thread] stack - show the stack trace
"""
if arg: # XXX TODO add depth parameter
raise CmdError("too many arguments")
pid, tid = self.get_process_and_thread_ids_from_prefix()
process = self.get_process(pid)
thread = process.get_thread(tid)
try:
stack_trace = thread.get_stack_trace_with_labels()
if stack_trace:
print(CrashDump.dump_stack_trace_with_labels(stack_trace),)
else:
print("No stack trace available for thread (%d)" % tid)
except WindowsError:
print("Can't get stack trace for thread (%d)" % tid)
do_k = do_stack
def do_break(self, arg):
"""
break - force a debug break in all debugees
break <process> [process...] - force a debug break
"""
debug = self.debug
system = debug.system
targets = self.input_process_list( self.split_tokens(arg) )
if not targets:
targets = debug.get_debugee_pids()
targets.sort()
if self.lastEvent:
current = self.lastEvent.get_pid()
else:
current = None
for pid in targets:
if pid != current and debug.is_debugee(pid):
process = system.get_process(pid)
try:
process.debug_break()
except WindowsError:
print("Can't force a debug break on process (%d)")
def do_step(self, arg):
"""
p - step on the current assembly instruction
next - step on the current assembly instruction
step - step on the current assembly instruction
"""
if self.cmdprefix:
raise CmdError("prefix not allowed")
if self.lastEvent is None:
raise CmdError("no current process set")
if arg: # XXX this check is to be removed
raise CmdError("too many arguments")
pid = self.lastEvent.get_pid()
thread = self.lastEvent.get_thread()
pc = thread.get_pc()
code = thread.disassemble(pc, 16)[0]
size = code[1]
opcode = code[2].lower()
if ' ' in opcode:
opcode = opcode[ : opcode.find(' ') ]
if opcode in self.jump_instructions or opcode in ('int', 'ret', 'retn'):
return self.do_trace(arg)
address = pc + size
## print(hex(pc), hex(address), size # XXX DEBUG
self.debug.stalk_at(pid, address)
return True
do_p = do_step
do_next = do_step
def do_trace(self, arg):
"""
t - trace at the current assembly instruction
trace - trace at the current assembly instruction
"""
if arg: # XXX this check is to be removed
raise CmdError("too many arguments")
if self.lastEvent is None:
raise CmdError("no current thread set")
self.lastEvent.get_thread().set_tf()
return True
do_t = do_trace
def do_bp(self, arg):
"""
[~process] bp <address> - set a code breakpoint
"""
pid = self.get_process_id_from_prefix()
if not self.debug.is_debugee(pid):
raise CmdError("target process is not being debugged")
process = self.get_process(pid)
token_list = self.split_tokens(arg, 1, 1)
try:
address = self.input_address(token_list[0], pid)
deferred = False
except Exception:
address = token_list[0]
deferred = True
if not address:
address = token_list[0]
deferred = True
self.debug.break_at(pid, address)
if deferred:
print("Deferred breakpoint set at %s" % address)
else:
print("Breakpoint set at %s" % address)
def do_ba(self, arg):
"""
[~thread] ba <a|w|e> <1|2|4|8> <address> - set hardware breakpoint
"""
debug = self.debug
thread = self.get_thread_from_prefix()
pid = thread.get_pid()
tid = thread.get_tid()
if not debug.is_debugee(pid):
raise CmdError("target thread is not being debugged")
token_list = self.split_tokens(arg, 3, 3)
access = token_list[0].lower()
size = token_list[1]
address = token_list[2]
if access == 'a':
access = debug.BP_BREAK_ON_ACCESS
elif access == 'w':
access = debug.BP_BREAK_ON_WRITE
elif access == 'e':
access = debug.BP_BREAK_ON_EXECUTION
else:
raise CmdError("bad access type: %s" % token_list[0])
if size == '1':
size = debug.BP_WATCH_BYTE
elif size == '2':
size = debug.BP_WATCH_WORD
elif size == '4':
size = debug.BP_WATCH_DWORD
elif size == '8':
size = debug.BP_WATCH_QWORD
else:
raise CmdError("bad breakpoint size: %s" % size)
thread = self.get_thread_from_prefix()
tid = thread.get_tid()
pid = thread.get_pid()
if not debug.is_debugee(pid):
raise CmdError("target process is not being debugged")
address = self.input_address(address, pid)
if debug.has_hardware_breakpoint(tid, address):
debug.erase_hardware_breakpoint(tid, address)
debug.define_hardware_breakpoint(tid, address, access, size)
debug.enable_hardware_breakpoint(tid, address)
def do_bm(self, arg):
"""
[~process] bm <address-address> - set memory breakpoint
"""
pid = self.get_process_id_from_prefix()
if not self.debug.is_debugee(pid):
raise CmdError("target process is not being debugged")
process = self.get_process(pid)
token_list = self.split_tokens(arg, 1, 2)
address, size = self.input_address_range(token_list[0], pid)
self.debug.watch_buffer(pid, address, size)
def do_bl(self, arg):
"""
bl - list the breakpoints for the current process
bl * - list the breakpoints for all processes
[~process] bl - list the breakpoints for the given process
bl <process> [process...] - list the breakpoints for each given process
"""
debug = self.debug
if arg == '*':
if self.cmdprefix:
raise CmdError("prefix not supported")
breakpoints = debug.get_debugee_pids()
else:
targets = self.input_process_list( self.split_tokens(arg) )
if self.cmdprefix:
targets.insert(0, self.input_process(self.cmdprefix))
if not targets:
if self.lastEvent is None:
raise CmdError("no current process is set")
targets = [ self.lastEvent.get_pid() ]
for pid in targets:
bplist = debug.get_process_code_breakpoints(pid)
printed_process_banner = False
if bplist:
if not printed_process_banner:
print("Process %d:" % pid)
printed_process_banner = True
for bp in bplist:
address = repr(bp)[1:-1].replace('remote address ','')
print(" %s" % address)
dbplist = debug.get_process_deferred_code_breakpoints(pid)
if dbplist:
if not printed_process_banner:
print("Process %d:" % pid)
printed_process_banner = True
for (label, action, oneshot) in dbplist:
if oneshot:
address = " Deferred unconditional one-shot" \
" code breakpoint at %s"
else:
address = " Deferred unconditional" \
" code breakpoint at %s"
address = address % label
print(" %s" % address)
bplist = debug.get_process_page_breakpoints(pid)
if bplist:
if not printed_process_banner:
print("Process %d:" % pid)
printed_process_banner = True
for bp in bplist:
address = repr(bp)[1:-1].replace('remote address ','')
print(" %s" % address)
for tid in debug.system.get_process(pid).iter_thread_ids():
bplist = debug.get_thread_hardware_breakpoints(tid)
if bplist:
print("Thread %d:" % tid)
for bp in bplist:
address = repr(bp)[1:-1].replace('remote address ','')
print(" %s" % address)
def do_bo(self, arg):
"""
[~process] bo <address> - make a code breakpoint one-shot
[~thread] bo <address> - make a hardware breakpoint one-shot
[~process] bo <address-address> - make a memory breakpoint one-shot
[~process] bo <address> <size> - make a memory breakpoint one-shot
"""
token_list = self.split_tokens(arg, 1, 2)
pid, tid, address, size = self.input_breakpoint(token_list)
debug = self.debug
found = False
if size is None:
if tid is not None:
if debug.has_hardware_breakpoint(tid, address):
debug.enable_one_shot_hardware_breakpoint(tid, address)
found = True
if pid is not None:
if debug.has_code_breakpoint(pid, address):
debug.enable_one_shot_code_breakpoint(pid, address)
found = True
else:
if debug.has_page_breakpoint(pid, address):
debug.enable_one_shot_page_breakpoint(pid, address)
found = True
if not found:
print("Error: breakpoint not found.")
def do_be(self, arg):
"""
[~process] be <address> - enable a code breakpoint
[~thread] be <address> - enable a hardware breakpoint
[~process] be <address-address> - enable a memory breakpoint
[~process] be <address> <size> - enable a memory breakpoint
"""
token_list = self.split_tokens(arg, 1, 2)
pid, tid, address, size = self.input_breakpoint(token_list)
debug = self.debug
found = False
if size is None:
if tid is not None:
if debug.has_hardware_breakpoint(tid, address):
debug.enable_hardware_breakpoint(tid, address)
found = True
if pid is not None:
if debug.has_code_breakpoint(pid, address):
debug.enable_code_breakpoint(pid, address)
found = True
else:
if debug.has_page_breakpoint(pid, address):
debug.enable_page_breakpoint(pid, address)
found = True
if not found:
print("Error: breakpoint not found.")
def do_bd(self, arg):
"""
[~process] bd <address> - disable a code breakpoint
[~thread] bd <address> - disable a hardware breakpoint
[~process] bd <address-address> - disable a memory breakpoint
[~process] bd <address> <size> - disable a memory breakpoint
"""
token_list = self.split_tokens(arg, 1, 2)
pid, tid, address, size = self.input_breakpoint(token_list)
debug = self.debug
found = False
if size is None:
if tid is not None:
if debug.has_hardware_breakpoint(tid, address):
debug.disable_hardware_breakpoint(tid, address)
found = True
if pid is not None:
if debug.has_code_breakpoint(pid, address):
debug.disable_code_breakpoint(pid, address)
found = True
else:
if debug.has_page_breakpoint(pid, address):
debug.disable_page_breakpoint(pid, address)
found = True
if not found:
print("Error: breakpoint not found.")
def do_bc(self, arg):
"""
[~process] bc <address> - clear a code breakpoint
[~thread] bc <address> - clear a hardware breakpoint
[~process] bc <address-address> - clear a memory breakpoint
[~process] bc <address> <size> - clear a memory breakpoint
"""
token_list = self.split_tokens(arg, 1, 2)
pid, tid, address, size = self.input_breakpoint(token_list)
debug = self.debug
found = False
if size is None:
if tid is not None:
if debug.has_hardware_breakpoint(tid, address):
debug.dont_watch_variable(tid, address)
found = True
if pid is not None:
if debug.has_code_breakpoint(pid, address):
debug.dont_break_at(pid, address)
found = True
else:
if debug.has_page_breakpoint(pid, address):
debug.dont_watch_buffer(pid, address, size)
found = True
if not found:
print("Error: breakpoint not found.")
def do_disassemble(self, arg):
"""
[~thread] u [register] - show code disassembly
[~process] u [address] - show code disassembly
[~thread] disassemble [register] - show code disassembly
[~process] disassemble [address] - show code disassembly
"""
if not arg:
arg = self.default_disasm_target
token_list = self.split_tokens(arg, 1, 1)
pid, tid = self.get_process_and_thread_ids_from_prefix()
process = self.get_process(pid)
address = self.input_address(token_list[0], pid, tid)
try:
code = process.disassemble(address, 15*8)[:8]
except Exception:
msg = "can't disassemble address %s"
msg = msg % HexDump.address(address)
raise CmdError(msg)
if code:
label = process.get_label_at_address(address)
last_code = code[-1]
next_address = last_code[0] + last_code[1]
next_address = HexOutput.integer(next_address)
self.default_disasm_target = next_address
print("%s:" % label)
## print(CrashDump.dump_code(code))
for line in code:
print(CrashDump.dump_code_line(line, bShowDump = False))
do_u = do_disassemble
def do_search(self, arg):
"""
[~process] s [address-address] <search string>
[~process] search [address-address] <search string>
"""
token_list = self.split_tokens(arg, 1, 3)
pid, tid = self.get_process_and_thread_ids_from_prefix()
process = self.get_process(pid)
if len(token_list) == 1:
pattern = token_list[0]
minAddr = None
maxAddr = None
else:
pattern = token_list[-1]
addr, size = self.input_address_range(token_list[:-1], pid, tid)
minAddr = addr
maxAddr = addr + size
iter = process.search_bytes(pattern)
if process.get_bits() == 32:
addr_width = 8
else:
addr_width = 16
# TODO: need a prettier output here!
for addr in iter:
print(HexDump.address(addr, addr_width))
do_s = do_search
def do_searchhex(self, arg):
"""
[~process] sh [address-address] <hexadecimal pattern>
[~process] searchhex [address-address] <hexadecimal pattern>
"""
token_list = self.split_tokens(arg, 1, 3)
pid, tid = self.get_process_and_thread_ids_from_prefix()
process = self.get_process(pid)
if len(token_list) == 1:
pattern = token_list[0]
minAddr = None
maxAddr = None
else:
pattern = token_list[-1]
addr, size = self.input_address_range(token_list[:-1], pid, tid)
minAddr = addr
maxAddr = addr + size
iter = process.search_hexa(pattern)
if process.get_bits() == 32:
addr_width = 8
else:
addr_width = 16
for addr, bytes in iter:
print(HexDump.hexblock(bytes, addr, addr_width),)
do_sh = do_searchhex
## def do_strings(self, arg):
## """
## [~process] strings - extract ASCII strings from memory
## """
## if arg:
## raise CmdError("too many arguments")
## pid, tid = self.get_process_and_thread_ids_from_prefix()
## process = self.get_process(pid)
## for addr, size, data in process.strings():
## print("%s: %r" % (HexDump.address(addr), data)
def do_d(self, arg):
"""
[~thread] d <register> - show memory contents
[~thread] d <register-register> - show memory contents
[~thread] d <register> <size> - show memory contents
[~process] d <address> - show memory contents
[~process] d <address-address> - show memory contents
[~process] d <address> <size> - show memory contents
"""
return self.last_display_command(arg)
def do_db(self, arg):
"""
[~thread] db <register> - show memory contents as bytes
[~thread] db <register-register> - show memory contents as bytes
[~thread] db <register> <size> - show memory contents as bytes
[~process] db <address> - show memory contents as bytes
[~process] db <address-address> - show memory contents as bytes
[~process] db <address> <size> - show memory contents as bytes
"""
self.print_memory_display(arg, HexDump.hexblock)
self.last_display_command = self.do_db
def do_dw(self, arg):
"""
[~thread] dw <register> - show memory contents as words
[~thread] dw <register-register> - show memory contents as words
[~thread] dw <register> <size> - show memory contents as words
[~process] dw <address> - show memory contents as words
[~process] dw <address-address> - show memory contents as words
[~process] dw <address> <size> - show memory contents as words
"""
self.print_memory_display(arg, HexDump.hexblock_word)
self.last_display_command = self.do_dw
def do_dd(self, arg):
"""
[~thread] dd <register> - show memory contents as dwords
[~thread] dd <register-register> - show memory contents as dwords
[~thread] dd <register> <size> - show memory contents as dwords
[~process] dd <address> - show memory contents as dwords
[~process] dd <address-address> - show memory contents as dwords
[~process] dd <address> <size> - show memory contents as dwords
"""
self.print_memory_display(arg, HexDump.hexblock_dword)
self.last_display_command = self.do_dd
def do_dq(self, arg):
"""
[~thread] dq <register> - show memory contents as qwords
[~thread] dq <register-register> - show memory contents as qwords
[~thread] dq <register> <size> - show memory contents as qwords
[~process] dq <address> - show memory contents as qwords
[~process] dq <address-address> - show memory contents as qwords
[~process] dq <address> <size> - show memory contents as qwords
"""
self.print_memory_display(arg, HexDump.hexblock_qword)
self.last_display_command = self.do_dq
# XXX TODO
# Change the way the default is used with ds and du
def do_ds(self, arg):
"""
[~thread] ds <register> - show memory contents as ANSI string
[~process] ds <address> - show memory contents as ANSI string
"""
if not arg:
arg = self.default_display_target
token_list = self.split_tokens(arg, 1, 1)
pid, tid, address, size = self.input_display(token_list, 256)
process = self.get_process(pid)
data = process.peek_string(address, False, size)
if data:
print(repr(data))
self.last_display_command = self.do_ds
def do_du(self, arg):
"""
[~thread] du <register> - show memory contents as Unicode string
[~process] du <address> - show memory contents as Unicode string
"""
if not arg:
arg = self.default_display_target
token_list = self.split_tokens(arg, 1, 2)
pid, tid, address, size = self.input_display(token_list, 256)
process = self.get_process(pid)
data = process.peek_string(address, True, size)
if data:
print(repr(data))
self.last_display_command = self.do_du
def do_register(self, arg):
"""
[~thread] r - print(the value of all registers
[~thread] r <register> - print(the value of a register
[~thread] r <register>=<value> - change the value of a register
[~thread] register - print(the value of all registers
[~thread] register <register> - print(the value of a register
[~thread] register <register>=<value> - change the value of a register
"""
arg = arg.strip()
if not arg:
self.print_current_location()
else:
equ = arg.find('=')
if equ >= 0:
register = arg[:equ].strip()
value = arg[equ+1:].strip()
if not value:
value = '0'
self.change_register(register, value)
else:
value = self.input_register(arg)
if value is None:
raise CmdError("unknown register: %s" % arg)
try:
label = None
thread = self.get_thread_from_prefix()
process = thread.get_process()
module = process.get_module_at_address(value)
if module:
label = module.get_label_at_address(value)
except RuntimeError:
label = None
reg = arg.upper()
val = HexDump.address(value)
if label:
print("%s: %s (%s)" % (reg, val, label))
else:
print("%s: %s" % (reg, val))
do_r = do_register
def do_eb(self, arg):
"""
[~process] eb <address> <data> - write the data to the specified address
"""
# TODO
# data parameter should be optional, use a child Cmd here
pid = self.get_process_id_from_prefix()
token_list = self.split_tokens(arg, 2)
address = self.input_address(token_list[0], pid)
data = HexInput.hexadecimal(' '.join(token_list[1:]))
self.write_memory(address, data, pid)
# XXX TODO
# add ew, ed and eq here
def do_find(self, arg):
"""
[~process] f <string> - find the string in the process memory
[~process] find <string> - find the string in the process memory
"""
if not arg:
raise CmdError("missing parameter: string")
process = self.get_process_from_prefix()
self.find_in_memory(arg, process)
do_f = do_find
def do_memory(self, arg):
"""
[~process] m - show the process memory map
[~process] memory - show the process memory map
"""
if arg: # TODO: take min and max addresses
raise CmdError("too many arguments")
process = self.get_process_from_prefix()
try:
memoryMap = process.get_memory_map()
mappedFilenames = process.get_mapped_filenames()
print('')
print(CrashDump.dump_memory_map(memoryMap, mappedFilenames))
except WindowsError:
msg = "can't get memory information for process (%d)"
raise CmdError(msg % process.get_pid())
do_m = do_memory
#------------------------------------------------------------------------------
# Event handling
# TODO
# * add configurable stop/don't stop behavior on events and exceptions
# Stop for all events, unless stated otherwise.
def event(self, event):
self.print_event(event)
self.prompt_user()
# Stop for all exceptions, unless stated otherwise.
def exception(self, event):
self.print_exception(event)
self.prompt_user()
# Stop for breakpoint exceptions.
def breakpoint(self, event):
if hasattr(event, 'breakpoint') and event.breakpoint:
self.print_breakpoint_location(event)
else:
self.print_exception(event)
self.prompt_user()
# Stop for WOW64 breakpoint exceptions.
def wow64_breakpoint(self, event):
self.print_exception(event)
self.prompt_user()
# Stop for single step exceptions.
def single_step(self, event):
if event.debug.is_tracing(event.get_tid()):
self.print_breakpoint_location(event)
else:
self.print_exception(event)
self.prompt_user()
# Don't stop for C++ exceptions.
def ms_vc_exception(self, event):
self.print_exception(event)
event.continueStatus = win32.DBG_CONTINUE
# Don't stop for process start.
def create_process(self, event):
self.print_process_start(event)
self.print_thread_start(event)
self.print_module_load(event)
# Don't stop for process exit.
def exit_process(self, event):
self.print_process_end(event)
# Don't stop for thread creation.
def create_thread(self, event):
self.print_thread_start(event)
# Don't stop for thread exit.
def exit_thread(self, event):
self.print_thread_end(event)
# Don't stop for DLL load.
def load_dll(self, event):
self.print_module_load(event)
# Don't stop for DLL unload.
def unload_dll(self, event):
self.print_module_unload(event)
# Don't stop for debug strings.
def output_string(self, event):
self.print_debug_string(event)
#------------------------------------------------------------------------------
# History file
def load_history(self):
global readline
if readline is None:
try:
import readline
except ImportError:
return
if self.history_file_full_path is None:
folder = os.environ.get('USERPROFILE', '')
if not folder:
folder = os.environ.get('HOME', '')
if not folder:
folder = os.path.split(sys.argv[0])[1]
if not folder:
folder = os.path.curdir
self.history_file_full_path = os.path.join(folder,
self.history_file)
try:
if os.path.exists(self.history_file_full_path):
readline.read_history_file(self.history_file_full_path)
except IOError:
e = sys.exc_info()[1]
warnings.warn("Cannot load history file, reason: %s" % str(e))
def save_history(self):
if self.history_file_full_path is not None:
global readline
if readline is None:
try:
import readline
except ImportError:
return
try:
readline.write_history_file(self.history_file_full_path)
except IOError:
e = sys.exc_info()[1]
warnings.warn("Cannot save history file, reason: %s" % str(e))
#------------------------------------------------------------------------------
# Main loop
# Debugging loop.
def loop(self):
self.debuggerExit = False
debug = self.debug
# Stop on the initial event, if any.
if self.lastEvent is not None:
self.cmdqueue.append('r')
self.prompt_user()
# Loop until the debugger is told to quit.
while not self.debuggerExit:
try:
# If for some reason the last event wasn't continued,
# continue it here. This won't be done more than once
# for a given Event instance, though.
try:
debug.cont()
# On error, show the command prompt.
except Exception:
traceback.print_exc()
self.prompt_user()
# While debugees are attached, handle debug events.
# Some debug events may cause the command prompt to be shown.
if self.debug.get_debugee_count() > 0:
try:
# Get the next debug event.
debug.wait()
# Dispatch the debug event.
try:
debug.dispatch()
# Continue the debug event.
finally:
debug.cont()
# On error, show the command prompt.
except Exception:
traceback.print_exc()
self.prompt_user()
# While no debugees are attached, show the command prompt.
else:
self.prompt_user()
# When the user presses Ctrl-C send a debug break to all debugees.
except KeyboardInterrupt:
success = False
try:
print("*** User requested debug break")
system = debug.system
for pid in debug.get_debugee_pids():
try:
system.get_process(pid).debug_break()
success = True
except:
traceback.print_exc()
except:
traceback.print_exc()
if not success:
raise # This should never happen!
| apache-2.0 |
rgerkin/pyNeuroML | pyneuroml/tune/NeuroMLSimulation.py | 1 | 5357 | '''
A class for running a single instance of a NeuroML model by generating a
LEMS file and using pyNeuroML to run in a chosen simulator
'''
import sys
import time
from pyneuroml import pynml
from pyneuroml.lems import generate_lems_file_for_neuroml
try:
import pyelectro # Not used here, just for checking installation
except:
print('>> Note: pyelectro from https://github.com/pgleeson/pyelectro is required!')
exit()
try:
import neurotune # Not used here, just for checking installation
except:
print('>> Note: neurotune from https://github.com/pgleeson/neurotune is required!')
exit()
class NeuroMLSimulation(object):
def __init__(self,
reference,
neuroml_file,
target,
sim_time=1000,
dt=0.05,
simulator='jNeuroML',
generate_dir = './',
cleanup = True,
nml_doc = None):
self.sim_time = sim_time
self.dt = dt
self.simulator = simulator
self.generate_dir = generate_dir if generate_dir.endswith('/') else generate_dir+'/'
self.reference = reference
self.target = target
self.neuroml_file = neuroml_file
self.nml_doc = nml_doc
self.cleanup = cleanup
self.already_run = False
def show(self):
"""
Plot the result of the simulation once it's been intialized
"""
from matplotlib import pyplot as plt
if self.already_run:
for ref in self.volts.keys():
plt.plot(self.t, self.volts[ref], label=ref)
plt.title("Simulation voltage vs time")
plt.legend()
plt.xlabel("Time [ms]")
plt.ylabel("Voltage [mV]")
else:
pynml.print_comment("First you have to 'go()' the simulation.", True)
plt.show()
def go(self):
lems_file_name = 'LEMS_%s.xml'%(self.reference)
generate_lems_file_for_neuroml(self.reference,
self.neuroml_file,
self.target,
self.sim_time,
self.dt,
lems_file_name = lems_file_name,
target_dir = self.generate_dir,
nml_doc = self.nml_doc)
pynml.print_comment_v("Running a simulation of %s ms with timestep %s ms: %s"%(self.sim_time, self.dt, lems_file_name))
self.already_run = True
start = time.time()
if self.simulator == 'jNeuroML':
results = pynml.run_lems_with_jneuroml(lems_file_name,
nogui=True,
load_saved_data=True,
plot=False,
exec_in_dir = self.generate_dir,
verbose=False,
cleanup=self.cleanup)
elif self.simulator == 'jNeuroML_NEURON':
results = pynml.run_lems_with_jneuroml_neuron(lems_file_name,
nogui=True,
load_saved_data=True,
plot=False,
exec_in_dir = self.generate_dir,
verbose=False,
cleanup=self.cleanup)
else:
pynml.print_comment_v('Unsupported simulator: %s'%self.simulator)
exit()
secs = time.time()-start
pynml.print_comment_v("Ran simulation in %s in %f seconds (%f mins)\n\n"%(self.simulator, secs, secs/60.0))
self.t = [t*1000 for t in results['t']]
self.volts = {}
for key in results.keys():
if key != 't':
self.volts[key] = [v*1000 for v in results[key]]
if __name__ == '__main__':
sim_time = 700
dt = 0.05
if len(sys.argv) == 2 and sys.argv[1] == '-net':
sim = NeuroMLSimulation('TestNet',
'../../examples/test_data/simplenet.nml',
'simplenet',
sim_time,
dt,
'jNeuroML',
'temp/')
sim.go()
sim.show()
else:
sim = NeuroMLSimulation('TestHH',
'../../examples/test_data/HHCellNetwork.net.nml',
'HHCellNetwork',
sim_time,
dt,
'jNeuroML',
'temp')
sim.go()
sim.show()
| lgpl-3.0 |
openhatch/new-mini-tasks | vendor/packages/Django/django/contrib/gis/geos/point.py | 224 | 4351 | from ctypes import c_uint
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.geometry import GEOSGeometry
from django.contrib.gis.geos import prototypes as capi
from django.utils import six
from django.utils.six.moves import xrange
class Point(GEOSGeometry):
_minlength = 2
_maxlength = 3
def __init__(self, x, y=None, z=None, srid=None):
"""
The Point object may be initialized with either a tuple, or individual
parameters.
For Example:
>>> p = Point((5, 23)) # 2D point, passed in as a tuple
>>> p = Point(5, 23, 8) # 3D point, passed in with individual parameters
"""
if isinstance(x, (tuple, list)):
# Here a tuple or list was passed in under the `x` parameter.
ndim = len(x)
coords = x
elif isinstance(x, six.integer_types + (float,)) and isinstance(y, six.integer_types + (float,)):
# Here X, Y, and (optionally) Z were passed in individually, as parameters.
if isinstance(z, six.integer_types + (float,)):
ndim = 3
coords = [x, y, z]
else:
ndim = 2
coords = [x, y]
else:
raise TypeError('Invalid parameters given for Point initialization.')
point = self._create_point(ndim, coords)
# Initializing using the address returned from the GEOS
# createPoint factory.
super(Point, self).__init__(point, srid=srid)
def _create_point(self, ndim, coords):
"""
Create a coordinate sequence, set X, Y, [Z], and create point
"""
if ndim < 2 or ndim > 3:
raise TypeError('Invalid point dimension: %s' % str(ndim))
cs = capi.create_cs(c_uint(1), c_uint(ndim))
i = iter(coords)
capi.cs_setx(cs, 0, next(i))
capi.cs_sety(cs, 0, next(i))
if ndim == 3: capi.cs_setz(cs, 0, next(i))
return capi.create_point(cs)
def _set_list(self, length, items):
ptr = self._create_point(length, items)
if ptr:
capi.destroy_geom(self.ptr)
self._ptr = ptr
self._set_cs()
else:
# can this happen?
raise GEOSException('Geometry resulting from slice deletion was invalid.')
def _set_single(self, index, value):
self._cs.setOrdinate(index, 0, value)
def __iter__(self):
"Allows iteration over coordinates of this Point."
for i in xrange(len(self)):
yield self[i]
def __len__(self):
"Returns the number of dimensions for this Point (either 0, 2 or 3)."
if self.empty: return 0
if self.hasz: return 3
else: return 2
def _get_single_external(self, index):
if index == 0:
return self.x
elif index == 1:
return self.y
elif index == 2:
return self.z
_get_single_internal = _get_single_external
def get_x(self):
"Returns the X component of the Point."
return self._cs.getOrdinate(0, 0)
def set_x(self, value):
"Sets the X component of the Point."
self._cs.setOrdinate(0, 0, value)
def get_y(self):
"Returns the Y component of the Point."
return self._cs.getOrdinate(1, 0)
def set_y(self, value):
"Sets the Y component of the Point."
self._cs.setOrdinate(1, 0, value)
def get_z(self):
"Returns the Z component of the Point."
if self.hasz:
return self._cs.getOrdinate(2, 0)
else:
return None
def set_z(self, value):
"Sets the Z component of the Point."
if self.hasz:
self._cs.setOrdinate(2, 0, value)
else:
raise GEOSException('Cannot set Z on 2D Point.')
# X, Y, Z properties
x = property(get_x, set_x)
y = property(get_y, set_y)
z = property(get_z, set_z)
### Tuple setting and retrieval routines. ###
def get_coords(self):
"Returns a tuple of the point."
return self._cs.tuple
def set_coords(self, tup):
"Sets the coordinates of the point with the given tuple."
self._cs[0] = tup
# The tuple and coords properties
tuple = property(get_coords, set_coords)
coords = tuple
| apache-2.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/IPython/terminal/pt_inputhooks/wx.py | 8 | 5441 | """Enable wxPython to be used interacively in prompt_toolkit
"""
from __future__ import absolute_import
import sys
import signal
import time
from timeit import default_timer as clock
import wx
def inputhook_wx1(context):
"""Run the wx event loop by processing pending events only.
This approach seems to work, but its performance is not great as it
relies on having PyOS_InputHook called regularly.
"""
try:
app = wx.GetApp()
if app is not None:
assert wx.Thread_IsMain()
# Make a temporary event loop and process system events until
# there are no more waiting, then allow idle events (which
# will also deal with pending or posted wx events.)
evtloop = wx.EventLoop()
ea = wx.EventLoopActivator(evtloop)
while evtloop.Pending():
evtloop.Dispatch()
app.ProcessIdle()
del ea
except KeyboardInterrupt:
pass
return 0
class EventLoopTimer(wx.Timer):
def __init__(self, func):
self.func = func
wx.Timer.__init__(self)
def Notify(self):
self.func()
class EventLoopRunner(object):
def Run(self, time, input_is_ready):
self.input_is_ready = input_is_ready
self.evtloop = wx.EventLoop()
self.timer = EventLoopTimer(self.check_stdin)
self.timer.Start(time)
self.evtloop.Run()
def check_stdin(self):
if self.input_is_ready():
self.timer.Stop()
self.evtloop.Exit()
def inputhook_wx2(context):
"""Run the wx event loop, polling for stdin.
This version runs the wx eventloop for an undetermined amount of time,
during which it periodically checks to see if anything is ready on
stdin. If anything is ready on stdin, the event loop exits.
The argument to elr.Run controls how often the event loop looks at stdin.
This determines the responsiveness at the keyboard. A setting of 1000
enables a user to type at most 1 char per second. I have found that a
setting of 10 gives good keyboard response. We can shorten it further,
but eventually performance would suffer from calling select/kbhit too
often.
"""
try:
app = wx.GetApp()
if app is not None:
assert wx.Thread_IsMain()
elr = EventLoopRunner()
# As this time is made shorter, keyboard response improves, but idle
# CPU load goes up. 10 ms seems like a good compromise.
elr.Run(time=10, # CHANGE time here to control polling interval
input_is_ready=context.input_is_ready)
except KeyboardInterrupt:
pass
return 0
def inputhook_wx3(context):
"""Run the wx event loop by processing pending events only.
This is like inputhook_wx1, but it keeps processing pending events
until stdin is ready. After processing all pending events, a call to
time.sleep is inserted. This is needed, otherwise, CPU usage is at 100%.
This sleep time should be tuned though for best performance.
"""
# We need to protect against a user pressing Control-C when IPython is
# idle and this is running. We trap KeyboardInterrupt and pass.
try:
app = wx.GetApp()
if app is not None:
assert wx.Thread_IsMain()
# The import of wx on Linux sets the handler for signal.SIGINT
# to 0. This is a bug in wx or gtk. We fix by just setting it
# back to the Python default.
if not callable(signal.getsignal(signal.SIGINT)):
signal.signal(signal.SIGINT, signal.default_int_handler)
evtloop = wx.EventLoop()
ea = wx.EventLoopActivator(evtloop)
t = clock()
while not context.input_is_ready():
while evtloop.Pending():
t = clock()
evtloop.Dispatch()
app.ProcessIdle()
# We need to sleep at this point to keep the idle CPU load
# low. However, if sleep to long, GUI response is poor. As
# a compromise, we watch how often GUI events are being processed
# and switch between a short and long sleep time. Here are some
# stats useful in helping to tune this.
# time CPU load
# 0.001 13%
# 0.005 3%
# 0.01 1.5%
# 0.05 0.5%
used_time = clock() - t
if used_time > 10.0:
# print 'Sleep for 1 s' # dbg
time.sleep(1.0)
elif used_time > 0.1:
# Few GUI events coming in, so we can sleep longer
# print 'Sleep for 0.05 s' # dbg
time.sleep(0.05)
else:
# Many GUI events coming in, so sleep only very little
time.sleep(0.001)
del ea
except KeyboardInterrupt:
pass
return 0
if sys.platform == 'darwin':
# On OSX, evtloop.Pending() always returns True, regardless of there being
# any events pending. As such we can't use implementations 1 or 3 of the
# inputhook as those depend on a pending/dispatch loop.
inputhook = inputhook_wx2
else:
# This is our default implementation
inputhook = inputhook_wx3
| gpl-3.0 |
bearstech/ansible | test/units/module_utils/test_network_common.py | 31 | 5437 | # -*- coding: utf-8 -*-
#
# (c) 2017 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.module_utils.network_common import to_list, sort_list
from ansible.module_utils.network_common import dict_diff, dict_merge
from ansible.module_utils.network_common import conditional, Template
class TestModuleUtilsNetworkCommon(unittest.TestCase):
def test_to_list(self):
for scalar in ('string', 1, True, False, None):
self.assertTrue(isinstance(to_list(scalar), list))
for container in ([1, 2, 3], {'one': 1}):
self.assertTrue(isinstance(to_list(container), list))
test_list = [1, 2, 3]
self.assertNotEqual(id(test_list), id(to_list(test_list)))
def test_sort(self):
data = [3, 1, 2]
self.assertEqual([1, 2, 3], sort_list(data))
string_data = '123'
self.assertEqual(string_data, sort_list(string_data))
def test_dict_diff(self):
base = dict(obj2=dict(), b1=True, b2=False, b3=False,
one=1, two=2, three=3, obj1=dict(key1=1, key2=2),
l1=[1, 3], l2=[1, 2, 3], l4=[4],
nested=dict(n1=dict(n2=2)))
other = dict(b1=True, b2=False, b3=True, b4=True,
one=1, three=4, four=4, obj1=dict(key1=2),
l1=[2, 1], l2=[3, 2, 1], l3=[1],
nested=dict(n1=dict(n2=2, n3=3)))
result = dict_diff(base, other)
# string assertions
self.assertNotIn('one', result)
self.assertNotIn('two', result)
self.assertEqual(result['three'], 4)
self.assertEqual(result['four'], 4)
# dict assertions
self.assertIn('obj1', result)
self.assertIn('key1', result['obj1'])
self.assertNotIn('key2', result['obj1'])
# list assertions
self.assertEqual(result['l1'], [2, 1])
self.assertNotIn('l2', result)
self.assertEqual(result['l3'], [1])
self.assertNotIn('l4', result)
# nested assertions
self.assertIn('obj1', result)
self.assertEqual(result['obj1']['key1'], 2)
self.assertNotIn('key2', result['obj1'])
# bool assertions
self.assertNotIn('b1', result)
self.assertNotIn('b2', result)
self.assertTrue(result['b3'])
self.assertTrue(result['b4'])
def test_dict_merge(self):
base = dict(obj2=dict(), b1=True, b2=False, b3=False,
one=1, two=2, three=3, obj1=dict(key1=1, key2=2),
l1=[1, 3], l2=[1, 2, 3], l4=[4],
nested=dict(n1=dict(n2=2)))
other = dict(b1=True, b2=False, b3=True, b4=True,
one=1, three=4, four=4, obj1=dict(key1=2),
l1=[2, 1], l2=[3, 2, 1], l3=[1],
nested=dict(n1=dict(n2=2, n3=3)))
result = dict_merge(base, other)
# string assertions
self.assertIn('one', result)
self.assertIn('two', result)
self.assertEqual(result['three'], 4)
self.assertEqual(result['four'], 4)
# dict assertions
self.assertIn('obj1', result)
self.assertIn('key1', result['obj1'])
self.assertIn('key2', result['obj1'])
# list assertions
self.assertEqual(result['l1'], [1, 2, 3])
self.assertIn('l2', result)
self.assertEqual(result['l3'], [1])
self.assertIn('l4', result)
# nested assertions
self.assertIn('obj1', result)
self.assertEqual(result['obj1']['key1'], 2)
self.assertIn('key2', result['obj1'])
# bool assertions
self.assertIn('b1', result)
self.assertIn('b2', result)
self.assertTrue(result['b3'])
self.assertTrue(result['b4'])
def test_conditional(self):
self.assertTrue(conditional(10, 10))
self.assertTrue(conditional('10', '10'))
self.assertTrue(conditional('foo', 'foo'))
self.assertTrue(conditional(True, True))
self.assertTrue(conditional(False, False))
self.assertTrue(conditional(None, None))
self.assertTrue(conditional("ge(1)", 1))
self.assertTrue(conditional("gt(1)", 2))
self.assertTrue(conditional("le(2)", 2))
self.assertTrue(conditional("lt(3)", 2))
self.assertTrue(conditional("eq(1)", 1))
self.assertTrue(conditional("neq(0)", 1))
self.assertTrue(conditional("min(1)", 1))
self.assertTrue(conditional("max(1)", 1))
self.assertTrue(conditional("exactly(1)", 1))
def test_template(self):
tmpl = Template()
self.assertEqual('foo', tmpl('{{ test }}', {'test': 'foo'}))
| gpl-3.0 |
heke123/chromium-crosswalk | build/android/buildbot/bb_device_status_check.py | 7 | 14834 | #!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A class to keep track of devices across builds and report state."""
import argparse
import json
import logging
import os
import psutil
import re
import signal
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import devil_chromium
from devil import devil_env
from devil.android import battery_utils
from devil.android import device_blacklist
from devil.android import device_errors
from devil.android import device_list
from devil.android import device_utils
from devil.android.sdk import adb_wrapper
from devil.constants import exit_codes
from devil.utils import lsusb
from devil.utils import reset_usb
from devil.utils import run_tests_helper
from pylib.constants import host_paths
_RE_DEVICE_ID = re.compile(r'Device ID = (\d+)')
def KillAllAdb():
def GetAllAdb():
for p in psutil.process_iter():
try:
if 'adb' in p.name:
yield p
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
for sig in [signal.SIGTERM, signal.SIGQUIT, signal.SIGKILL]:
for p in GetAllAdb():
try:
logging.info('kill %d %d (%s [%s])', sig, p.pid, p.name,
' '.join(p.cmdline))
p.send_signal(sig)
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
for p in GetAllAdb():
try:
logging.error('Unable to kill %d (%s [%s])', p.pid, p.name,
' '.join(p.cmdline))
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
def _IsBlacklisted(serial, blacklist):
return blacklist and serial in blacklist.Read()
def _BatteryStatus(device, blacklist):
battery_info = {}
try:
battery = battery_utils.BatteryUtils(device)
battery_info = battery.GetBatteryInfo(timeout=5)
battery_level = int(battery_info.get('level', 100))
if battery_level < 15:
logging.error('Critically low battery level (%d)', battery_level)
battery = battery_utils.BatteryUtils(device)
if not battery.GetCharging():
battery.SetCharging(True)
if blacklist:
blacklist.Extend([device.adb.GetDeviceSerial()], reason='low_battery')
except device_errors.CommandFailedError:
logging.exception('Failed to get battery information for %s',
str(device))
return battery_info
def _IMEISlice(device):
imei_slice = ''
try:
for l in device.RunShellCommand(['dumpsys', 'iphonesubinfo'],
check_return=True, timeout=5):
m = _RE_DEVICE_ID.match(l)
if m:
imei_slice = m.group(1)[-6:]
except device_errors.CommandFailedError:
logging.exception('Failed to get IMEI slice for %s', str(device))
return imei_slice
def DeviceStatus(devices, blacklist):
"""Generates status information for the given devices.
Args:
devices: The devices to generate status for.
blacklist: The current device blacklist.
Returns:
A dict of the following form:
{
'<serial>': {
'serial': '<serial>',
'adb_status': str,
'usb_status': bool,
'blacklisted': bool,
# only if the device is connected and not blacklisted
'type': ro.build.product,
'build': ro.build.id,
'build_detail': ro.build.fingerprint,
'battery': {
...
},
'imei_slice': str,
'wifi_ip': str,
},
...
}
"""
adb_devices = {
a[0].GetDeviceSerial(): a
for a in adb_wrapper.AdbWrapper.Devices(desired_state=None, long_list=True)
}
usb_devices = set(lsusb.get_android_devices())
def blacklisting_device_status(device):
serial = device.adb.GetDeviceSerial()
adb_status = (
adb_devices[serial][1] if serial in adb_devices
else 'missing')
usb_status = bool(serial in usb_devices)
device_status = {
'serial': serial,
'adb_status': adb_status,
'usb_status': usb_status,
}
if not _IsBlacklisted(serial, blacklist):
if adb_status == 'device':
try:
build_product = device.build_product
build_id = device.build_id
build_fingerprint = device.GetProp('ro.build.fingerprint', cache=True)
wifi_ip = device.GetProp('dhcp.wlan0.ipaddress')
battery_info = _BatteryStatus(device, blacklist)
imei_slice = _IMEISlice(device)
if (device.product_name == 'mantaray' and
battery_info.get('AC powered', None) != 'true'):
logging.error('Mantaray device not connected to AC power.')
device_status.update({
'ro.build.product': build_product,
'ro.build.id': build_id,
'ro.build.fingerprint': build_fingerprint,
'battery': battery_info,
'imei_slice': imei_slice,
'wifi_ip': wifi_ip,
# TODO(jbudorick): Remove these once no clients depend on them.
'type': build_product,
'build': build_id,
'build_detail': build_fingerprint,
})
except device_errors.CommandFailedError:
logging.exception('Failure while getting device status for %s.',
str(device))
if blacklist:
blacklist.Extend([serial], reason='status_check_failure')
except device_errors.CommandTimeoutError:
logging.exception('Timeout while getting device status for %s.',
str(device))
if blacklist:
blacklist.Extend([serial], reason='status_check_timeout')
elif blacklist:
blacklist.Extend([serial],
reason=adb_status if usb_status else 'offline')
device_status['blacklisted'] = _IsBlacklisted(serial, blacklist)
return device_status
parallel_devices = device_utils.DeviceUtils.parallel(devices)
statuses = parallel_devices.pMap(blacklisting_device_status).pGet(None)
return statuses
def RecoverDevices(devices, blacklist):
"""Attempts to recover any inoperable devices in the provided list.
Args:
devices: The list of devices to attempt to recover.
blacklist: The current device blacklist, which will be used then
reset.
Returns:
Nothing.
"""
statuses = DeviceStatus(devices, blacklist)
should_restart_usb = set(
status['serial'] for status in statuses
if (not status['usb_status']
or status['adb_status'] in ('offline', 'missing')))
should_restart_adb = should_restart_usb.union(set(
status['serial'] for status in statuses
if status['adb_status'] == 'unauthorized'))
should_reboot_device = should_restart_adb.union(set(
status['serial'] for status in statuses
if status['blacklisted']))
logging.debug('Should restart USB for:')
for d in should_restart_usb:
logging.debug(' %s', d)
logging.debug('Should restart ADB for:')
for d in should_restart_adb:
logging.debug(' %s', d)
logging.debug('Should reboot:')
for d in should_reboot_device:
logging.debug(' %s', d)
if blacklist:
blacklist.Reset()
if should_restart_adb:
KillAllAdb()
for serial in should_restart_usb:
try:
reset_usb.reset_android_usb(serial)
except IOError:
logging.exception('Unable to reset USB for %s.', serial)
if blacklist:
blacklist.Extend([serial], reason='usb_failure')
except device_errors.DeviceUnreachableError:
logging.exception('Unable to reset USB for %s.', serial)
if blacklist:
blacklist.Extend([serial], reason='offline')
def blacklisting_recovery(device):
if _IsBlacklisted(device.adb.GetDeviceSerial(), blacklist):
logging.debug('%s is blacklisted, skipping recovery.', str(device))
return
if str(device) in should_reboot_device:
try:
device.WaitUntilFullyBooted(retries=0)
return
except (device_errors.CommandTimeoutError,
device_errors.CommandFailedError):
logging.exception('Failure while waiting for %s. '
'Attempting to recover.', str(device))
try:
try:
device.Reboot(block=False, timeout=5, retries=0)
except device_errors.CommandTimeoutError:
logging.warning('Timed out while attempting to reboot %s normally.'
'Attempting alternative reboot.', str(device))
# The device drops offline before we can grab the exit code, so
# we don't check for status.
device.adb.Root()
device.adb.Shell('echo b > /proc/sysrq-trigger', expect_status=None,
timeout=5, retries=0)
except device_errors.CommandFailedError:
logging.exception('Failed to reboot %s.', str(device))
if blacklist:
blacklist.Extend([device.adb.GetDeviceSerial()],
reason='reboot_failure')
except device_errors.CommandTimeoutError:
logging.exception('Timed out while rebooting %s.', str(device))
if blacklist:
blacklist.Extend([device.adb.GetDeviceSerial()],
reason='reboot_timeout')
try:
device.WaitUntilFullyBooted(retries=0)
except device_errors.CommandFailedError:
logging.exception('Failure while waiting for %s.', str(device))
if blacklist:
blacklist.Extend([device.adb.GetDeviceSerial()],
reason='reboot_failure')
except device_errors.CommandTimeoutError:
logging.exception('Timed out while waiting for %s.', str(device))
if blacklist:
blacklist.Extend([device.adb.GetDeviceSerial()],
reason='reboot_timeout')
device_utils.DeviceUtils.parallel(devices).pMap(blacklisting_recovery)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--out-dir',
help='Directory where the device path is stored',
default=os.path.join(host_paths.DIR_SOURCE_ROOT, 'out'))
parser.add_argument('--restart-usb', action='store_true',
help='DEPRECATED. '
'This script now always tries to reset USB.')
parser.add_argument('--json-output',
help='Output JSON information into a specified file.')
parser.add_argument('--adb-path',
help='Absolute path to the adb binary to use.')
parser.add_argument('--blacklist-file', help='Device blacklist JSON file.')
parser.add_argument('--known-devices-file', action='append', default=[],
dest='known_devices_files',
help='Path to known device lists.')
parser.add_argument('-v', '--verbose', action='count', default=1,
help='Log more information.')
args = parser.parse_args()
run_tests_helper.SetLogLevel(args.verbose)
devil_custom_deps = None
if args.adb_path:
devil_custom_deps = {
'adb': {
devil_env.GetPlatform(): [args.adb_path],
},
}
devil_chromium.Initialize(custom_deps=devil_custom_deps)
blacklist = (device_blacklist.Blacklist(args.blacklist_file)
if args.blacklist_file
else None)
last_devices_path = os.path.join(
args.out_dir, device_list.LAST_DEVICES_FILENAME)
args.known_devices_files.append(last_devices_path)
expected_devices = set()
try:
for path in args.known_devices_files:
if os.path.exists(path):
expected_devices.update(device_list.GetPersistentDeviceList(path))
except IOError:
logging.warning('Problem reading %s, skipping.', path)
logging.info('Expected devices:')
for device in expected_devices:
logging.info(' %s', device)
usb_devices = set(lsusb.get_android_devices())
devices = [device_utils.DeviceUtils(s)
for s in expected_devices.union(usb_devices)]
RecoverDevices(devices, blacklist)
statuses = DeviceStatus(devices, blacklist)
# Log the state of all devices.
for status in statuses:
logging.info(status['serial'])
adb_status = status.get('adb_status')
blacklisted = status.get('blacklisted')
logging.info(' USB status: %s',
'online' if status.get('usb_status') else 'offline')
logging.info(' ADB status: %s', adb_status)
logging.info(' Blacklisted: %s', str(blacklisted))
if adb_status == 'device' and not blacklisted:
logging.info(' Device type: %s', status.get('ro.build.product'))
logging.info(' OS build: %s', status.get('ro.build.id'))
logging.info(' OS build fingerprint: %s',
status.get('ro.build.fingerprint'))
logging.info(' Battery state:')
for k, v in status.get('battery', {}).iteritems():
logging.info(' %s: %s', k, v)
logging.info(' IMEI slice: %s', status.get('imei_slice'))
logging.info(' WiFi IP: %s', status.get('wifi_ip'))
# Update the last devices file(s).
for path in args.known_devices_files:
device_list.WritePersistentDeviceList(
path, [status['serial'] for status in statuses])
# Write device info to file for buildbot info display.
if os.path.exists('/home/chrome-bot'):
with open('/home/chrome-bot/.adb_device_info', 'w') as f:
for status in statuses:
try:
if status['adb_status'] == 'device':
f.write('{serial} {adb_status} {build_product} {build_id} '
'{temperature:.1f}C {level}%\n'.format(
serial=status['serial'],
adb_status=status['adb_status'],
build_product=status['type'],
build_id=status['build'],
temperature=float(status['battery']['temperature']) / 10,
level=status['battery']['level']
))
elif status.get('usb_status', False):
f.write('{serial} {adb_status}\n'.format(
serial=status['serial'],
adb_status=status['adb_status']
))
else:
f.write('{serial} offline\n'.format(
serial=status['serial']
))
except Exception: # pylint: disable=broad-except
pass
# Dump the device statuses to JSON.
if args.json_output:
with open(args.json_output, 'wb') as f:
f.write(json.dumps(statuses, indent=4))
live_devices = [status['serial'] for status in statuses
if (status['adb_status'] == 'device'
and not _IsBlacklisted(status['serial'], blacklist))]
# If all devices failed, or if there are no devices, it's an infra error.
return 0 if live_devices else exit_codes.INFRA
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
RonnyPfannschmidt/pytest | testing/test_faulthandler.py | 3 | 5123 | import io
import sys
import pytest
from _pytest.pytester import Pytester
def test_enabled(pytester: Pytester) -> None:
"""Test single crashing test displays a traceback."""
pytester.makepyfile(
"""
import faulthandler
def test_crash():
faulthandler._sigabrt()
"""
)
result = pytester.runpytest_subprocess()
result.stderr.fnmatch_lines(["*Fatal Python error*"])
assert result.ret != 0
def setup_crashing_test(pytester: Pytester) -> None:
pytester.makepyfile(
"""
import faulthandler
import atexit
def test_ok():
atexit.register(faulthandler._sigabrt)
"""
)
def test_crash_during_shutdown_captured(pytester: Pytester) -> None:
"""
Re-enable faulthandler if pytest encountered it enabled during configure.
We should be able to then see crashes during interpreter shutdown.
"""
setup_crashing_test(pytester)
args = (sys.executable, "-Xfaulthandler", "-mpytest")
result = pytester.run(*args)
result.stderr.fnmatch_lines(["*Fatal Python error*"])
assert result.ret != 0
def test_crash_during_shutdown_not_captured(pytester: Pytester) -> None:
"""
Check that pytest leaves faulthandler disabled if it was not enabled during configure.
This prevents us from seeing crashes during interpreter shutdown (see #8260).
"""
setup_crashing_test(pytester)
args = (sys.executable, "-mpytest")
result = pytester.run(*args)
result.stderr.no_fnmatch_line("*Fatal Python error*")
assert result.ret != 0
def test_disabled(pytester: Pytester) -> None:
"""Test option to disable fault handler in the command line."""
pytester.makepyfile(
"""
import faulthandler
def test_disabled():
assert not faulthandler.is_enabled()
"""
)
result = pytester.runpytest_subprocess("-p", "no:faulthandler")
result.stdout.fnmatch_lines(["*1 passed*"])
assert result.ret == 0
@pytest.mark.parametrize(
"enabled",
[
pytest.param(
True, marks=pytest.mark.skip(reason="sometimes crashes on CI (#7022)")
),
False,
],
)
def test_timeout(pytester: Pytester, enabled: bool) -> None:
"""Test option to dump tracebacks after a certain timeout.
If faulthandler is disabled, no traceback will be dumped.
"""
pytester.makepyfile(
"""
import os, time
def test_timeout():
time.sleep(1 if "CI" in os.environ else 0.1)
"""
)
pytester.makeini(
"""
[pytest]
faulthandler_timeout = 0.01
"""
)
args = ["-p", "no:faulthandler"] if not enabled else []
result = pytester.runpytest_subprocess(*args)
tb_output = "most recent call first"
if enabled:
result.stderr.fnmatch_lines(["*%s*" % tb_output])
else:
assert tb_output not in result.stderr.str()
result.stdout.fnmatch_lines(["*1 passed*"])
assert result.ret == 0
@pytest.mark.parametrize("hook_name", ["pytest_enter_pdb", "pytest_exception_interact"])
def test_cancel_timeout_on_hook(monkeypatch, hook_name) -> None:
"""Make sure that we are cancelling any scheduled traceback dumping due
to timeout before entering pdb (pytest-dev/pytest-faulthandler#12) or any
other interactive exception (pytest-dev/pytest-faulthandler#14)."""
import faulthandler
from _pytest import faulthandler as faulthandler_plugin
called = []
monkeypatch.setattr(
faulthandler, "cancel_dump_traceback_later", lambda: called.append(1)
)
# call our hook explicitly, we can trust that pytest will call the hook
# for us at the appropriate moment
hook_func = getattr(faulthandler_plugin, hook_name)
hook_func()
assert called == [1]
def test_already_initialized_crash(pytester: Pytester) -> None:
"""Even if faulthandler is already initialized, we still dump tracebacks on crashes (#8258)."""
pytester.makepyfile(
"""
def test():
import faulthandler
faulthandler._sigabrt()
"""
)
result = pytester.run(
sys.executable,
"-X",
"faulthandler",
"-mpytest",
pytester.path,
)
result.stderr.fnmatch_lines(["*Fatal Python error*"])
assert result.ret != 0
def test_get_stderr_fileno_invalid_fd() -> None:
"""Test for faulthandler being able to handle invalid file descriptors for stderr (#8249)."""
from _pytest.faulthandler import get_stderr_fileno
class StdErrWrapper(io.StringIO):
"""
Mimic ``twisted.logger.LoggingFile`` to simulate returning an invalid file descriptor.
https://github.com/twisted/twisted/blob/twisted-20.3.0/src/twisted/logger/_io.py#L132-L139
"""
def fileno(self):
return -1
wrapper = StdErrWrapper()
with pytest.MonkeyPatch.context() as mp:
mp.setattr("sys.stderr", wrapper)
# Even when the stderr wrapper signals an invalid file descriptor,
# ``_get_stderr_fileno()`` should return the real one.
assert get_stderr_fileno() == 2
| mit |
shiftcontrol/UnityOpenCV | opencv/tests/swig_python/highgui/match.py | 3 | 1348 | """
This script will compare tho images and decides with a threshold
if these to images are "equal enough"
"""
# import the necessary things for OpenCV
from cv import *
from highgui import *
import frames
import sys
import os
PREFIX=os.path.join(os.environ["srcdir"],"../../opencv_extra/testdata/python/images/")
DisplayImages=False
if DisplayImages:
videowindow="video"
referencewindow="reference"
cvNamedWindow(videowindow,CV_WINDOW_AUTOSIZE)
cvNamedWindow(referencewindow,CV_WINDOW_AUTOSIZE)
# returns True/False if match/non-match
def match( image, index, thres ):
# load image from comparison set
QCIFcompare=cvLoadImage(PREFIX+frames.QCIF[index])
if QCIFcompare is None:
print "Couldn't open image "+PREFIX+frames.QCIF[index]+" for comparison!"
sys.exit(1)
# resize comparison image to input image dimensions
size=cvSize(image.width,image.height)
compare=cvCreateImage(size,IPL_DEPTH_8U,image.nChannels)
cvResize(QCIFcompare,compare)
# compare images
diff=cvNorm( image, compare, CV_RELATIVE_L2 )
if DisplayImages:
cvShowImage(videowindow,image)
cvShowImage(referencewindow,compare)
if diff<=thres:
cvWaitKey(200)
else:
print "index==",index,": max==",thres," is==",diff
cvWaitKey(5000)
cvReleaseImage(QCIFcompare)
cvReleaseImage(compare)
if diff<=thres:
return True
else:
return False
| gpl-3.0 |
klaus385/openpilot | selfdrive/crash.py | 2 | 1230 | """Install exception handler for process crash."""
import os
import sys
from selfdrive.version import version, dirty
from selfdrive.swaglog import cloudlog
if os.getenv("NOLOG") or os.getenv("NOCRASH"):
def capture_exception(*exc_info):
pass
def bind_user(**kwargs):
pass
def bind_extra(**kwargs):
pass
def install():
pass
else:
from raven import Client
from raven.transport.http import HTTPTransport
client = Client('https://1994756b5e6f41cf939a4c65de45f4f2:[email protected]/77924',
install_sys_hook=False, transport=HTTPTransport, release=version, tags={'dirty': dirty})
def capture_exception(*args, **kwargs):
client.captureException(*args, **kwargs)
cloudlog.error("crash", exc_info=kwargs.get('exc_info', 1))
def bind_user(**kwargs):
client.user_context(kwargs)
def bind_extra(**kwargs):
client.extra_context(kwargs)
def install():
# installs a sys.excepthook
__excepthook__ = sys.excepthook
def handle_exception(*exc_info):
if exc_info[0] not in (KeyboardInterrupt, SystemExit):
capture_exception(exc_info=exc_info)
__excepthook__(*exc_info)
sys.excepthook = handle_exception
| mit |
Azure/azure-sdk-for-python | sdk/powerbiembedded/azure-mgmt-powerbiembedded/azure/mgmt/powerbiembedded/models/operation_py3.py | 1 | 1163 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Operation(Model):
"""Operation.
:param name: The name of the operation being performed on this particular
object. This name should match the action name that appears in RBAC / the
event service.
:type name: str
:param display:
:type display: ~azure.mgmt.powerbiembedded.models.Display
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'Display'},
}
def __init__(self, *, name: str=None, display=None, **kwargs) -> None:
super(Operation, self).__init__(**kwargs)
self.name = name
self.display = display
| mit |
Anderson0026/mapproxy | mapproxy/script/conf/app.py | 1 | 6606 | # -:- encoding: utf-8 -:-
# This file is part of the MapProxy project.
# Copyright (C) 2013 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import sys
import os
import optparse
import logging
import textwrap
import datetime
import xml.etree.ElementTree
import yaml
from contextlib import contextmanager
from cStringIO import StringIO
from .sources import sources
from .layers import layers
from .caches import caches
from .seeds import seeds
from .utils import update_config, MapProxyYAMLDumper, download_capabilities
from mapproxy.config.loader import load_configuration
from mapproxy.util.ext.wmsparse import parse_capabilities
def setup_logging(level=logging.INFO):
mapproxy_log = logging.getLogger('mapproxy')
mapproxy_log.setLevel(level)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
"[%(asctime)s] %(name)s - %(levelname)s - %(message)s")
ch.setFormatter(formatter)
mapproxy_log.addHandler(ch)
def write_header(f, capabilities):
print >>f, '# MapProxy configuration automatically generated from:'
print >>f, '# %s' % capabilities
print >>f, '#'
print >>f, '# NOTE: The generated configuration can be highly inefficient,'
print >>f, '# especially when multiple layers and caches are requested at once.'
print >>f, '# Make sure you understand the generated configuration!'
print >>f, '#'
print >>f, '# Created on %s with:' % datetime.datetime.now()
print >>f, ' \\\n'.join(textwrap.wrap(' '.join(sys.argv), initial_indent='# ', subsequent_indent='# '))
print >>f, ''
@contextmanager
def file_or_stdout(name):
if name == '-':
yield sys.stdout
else:
with open(name, 'wb') as f:
yield f
def config_command(args):
parser = optparse.OptionParser("usage: %prog autoconfig [options]")
parser.add_option('--capabilities',
help="URL or filename of WMS 1.1.1/1.3.0 capabilities document")
parser.add_option('--output', help="filename for created MapProxy config [default: -]", default="-")
parser.add_option('--output-seed', help="filename for created seeding config")
parser.add_option('--base', help='base config to include in created MapProxy config')
parser.add_option('--overwrite',
help='YAML file with overwrites for the created MapProxy config')
parser.add_option('--overwrite-seed',
help='YAML file with overwrites for the created seeding config')
parser.add_option('--force', default=False, action='store_true',
help="overwrite existing files")
options, args = parser.parse_args(args)
if not options.capabilities:
parser.print_help()
print >>sys.stderr, "\nERROR: --capabilities required"
return 2
if not options.output and not options.output_seed:
parser.print_help()
print >>sys.stderr, "\nERROR: --output and/or --output-seed required"
return 2
if not options.force:
if options.output and options.output != '-' and os.path.exists(options.output):
print >>sys.stderr, "\nERROR: %s already exists, use --force to overwrite" % options.output
return 2
if options.output_seed and options.output_seed != '-' and os.path.exists(options.output_seed):
print >>sys.stderr, "\nERROR: %s already exists, use --force to overwrite" % options.output_seed
return 2
log = logging.getLogger('mapproxy_conf_cmd')
log.addHandler(logging.StreamHandler())
setup_logging(logging.WARNING)
srs_grids = {}
if options.base:
base = load_configuration(options.base)
for name, grid_conf in base.grids.iteritems():
if name.startswith('GLOBAL_'):
continue
srs_grids[grid_conf.tile_grid().srs.srs_code] = name
cap_doc = options.capabilities
if cap_doc.startswith(('http://', 'https://')):
cap_doc = download_capabilities(options.capabilities).read()
else:
cap_doc = open(cap_doc, 'rb').read()
try:
cap = parse_capabilities(StringIO(cap_doc))
except (xml.etree.ElementTree.ParseError, ValueError), ex:
print >>sys.stderr, ex
print >>sys.stderr, cap_doc[:1000] + ('...' if len(cap_doc) > 1000 else '')
return 3
overwrite = None
if options.overwrite:
with open(options.overwrite, 'rb') as f:
overwrite = yaml.load(f)
overwrite_seed = None
if options.overwrite_seed:
with open(options.overwrite_seed, 'rb') as f:
overwrite_seed = yaml.load(f)
conf = {}
if options.base:
conf['base'] = os.path.abspath(options.base)
conf['services'] = {'wms': {'md': {'title': cap.metadata()['title']}}}
if overwrite:
conf['services'] = update_config(conf['services'], overwrite.pop('service', {}))
conf['sources'] = sources(cap)
if overwrite:
conf['sources'] = update_config(conf['sources'], overwrite.pop('sources', {}))
conf['caches'] = caches(cap, conf['sources'], srs_grids=srs_grids)
if overwrite:
conf['caches'] = update_config(conf['caches'], overwrite.pop('caches', {}))
conf['layers'] = layers(cap, conf['caches'])
if overwrite:
conf['layers'] = update_config(conf['layers'], overwrite.pop('layers', {}))
if overwrite:
conf = update_config(conf, overwrite)
seed_conf = {}
seed_conf['seeds'], seed_conf['cleanups'] = seeds(cap, conf['caches'])
if overwrite_seed:
seed_conf = update_config(seed_conf, overwrite_seed)
if options.output:
with file_or_stdout(options.output) as f:
write_header(f, options.capabilities)
yaml.dump(conf, f, default_flow_style=False, Dumper=MapProxyYAMLDumper)
if options.output_seed:
with file_or_stdout(options.output_seed) as f:
write_header(f, options.capabilities)
yaml.dump(seed_conf, f, default_flow_style=False, Dumper=MapProxyYAMLDumper)
return 0 | apache-2.0 |
franciscod/python-telegram-bot | telegram/inlinequeryresultvideo.py | 2 | 2581 | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2016
# Leandro Toledo de Souza <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains the classes that represent Telegram
InlineQueryResultVideo"""
from telegram import InlineQueryResult, InlineKeyboardMarkup, InputMessageContent
class InlineQueryResultVideo(InlineQueryResult):
def __init__(self,
id,
video_url,
mime_type,
thumb_url,
title,
caption=None,
video_width=None,
video_height=None,
video_duration=None,
description=None,
reply_markup=None,
input_message_content=None,
**kwargs):
# Required
super(InlineQueryResultVideo, self).__init__('video', id)
self.video_url = video_url
self.mime_type = mime_type
self.thumb_url = thumb_url
self.title = title
# Optional
if caption:
self.caption = caption
if video_width:
self.video_width = video_width
if video_height:
self.video_height = video_height
if video_duration:
self.video_duration = video_duration
if description:
self.description = description
if reply_markup:
self.reply_markup = reply_markup
if input_message_content:
self.input_message_content = input_message_content
@staticmethod
def de_json(data):
data = super(InlineQueryResultVideo, InlineQueryResultVideo).de_json(data)
data['reply_markup'] = InlineKeyboardMarkup.de_json(data.get('reply_markup'))
data['input_message_content'] = InputMessageContent.de_json(data.get(
'input_message_content'))
return InlineQueryResultVideo(**data)
| gpl-2.0 |
shingonoide/odoo | addons/purchase/report/purchase_report.py | 50 | 7689 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#
# Please note that these reports are not multi-currency !!!
#
from openerp.osv import fields,osv
from openerp import tools
class purchase_report(osv.osv):
_name = "purchase.report"
_description = "Purchases Orders"
_auto = False
_columns = {
'date': fields.datetime('Order Date', readonly=True, help="Date on which this document has been created"), # TDE FIXME master: rename into date_order
'state': fields.selection([('draft', 'Request for Quotation'),
('confirmed', 'Waiting Supplier Ack'),
('approved', 'Approved'),
('except_picking', 'Shipping Exception'),
('except_invoice', 'Invoice Exception'),
('done', 'Done'),
('cancel', 'Cancelled')],'Order Status', readonly=True),
'product_id':fields.many2one('product.product', 'Product', readonly=True),
'picking_type_id': fields.many2one('stock.warehouse', 'Warehouse', readonly=True),
'location_id': fields.many2one('stock.location', 'Destination', readonly=True),
'partner_id':fields.many2one('res.partner', 'Supplier', readonly=True),
'pricelist_id':fields.many2one('product.pricelist', 'Pricelist', readonly=True),
'date_approve':fields.date('Date Approved', readonly=True),
'expected_date':fields.date('Expected Date', readonly=True),
'validator' : fields.many2one('res.users', 'Validated By', readonly=True),
'product_uom' : fields.many2one('product.uom', 'Reference Unit of Measure', required=True),
'company_id':fields.many2one('res.company', 'Company', readonly=True),
'user_id':fields.many2one('res.users', 'Responsible', readonly=True),
'delay':fields.float('Days to Validate', digits=(16,2), readonly=True),
'delay_pass':fields.float('Days to Deliver', digits=(16,2), readonly=True),
'quantity': fields.integer('Unit Quantity', readonly=True), # TDE FIXME master: rename into unit_quantity
'price_total': fields.float('Total Price', readonly=True),
'price_average': fields.float('Average Price', readonly=True, group_operator="avg"),
'negociation': fields.float('Purchase-Standard Price', readonly=True, group_operator="avg"),
'price_standard': fields.float('Products Value', readonly=True, group_operator="sum"),
'nbr': fields.integer('# of Lines', readonly=True), # TDE FIXME master: rename into nbr_lines
'category_id': fields.many2one('product.category', 'Category', readonly=True)
}
_order = 'date desc, price_total desc'
def init(self, cr):
tools.sql.drop_view_if_exists(cr, 'purchase_report')
cr.execute("""
create or replace view purchase_report as (
WITH currency_rate (currency_id, rate, date_start, date_end) AS (
SELECT r.currency_id, r.rate, r.name AS date_start,
(SELECT name FROM res_currency_rate r2
WHERE r2.name > r.name AND
r2.currency_id = r.currency_id
ORDER BY r2.name ASC
LIMIT 1) AS date_end
FROM res_currency_rate r
)
select
min(l.id) as id,
s.date_order as date,
l.state,
s.date_approve,
s.minimum_planned_date as expected_date,
s.dest_address_id,
s.pricelist_id,
s.validator,
spt.warehouse_id as picking_type_id,
s.partner_id as partner_id,
s.create_uid as user_id,
s.company_id as company_id,
l.product_id,
t.categ_id as category_id,
t.uom_id as product_uom,
s.location_id as location_id,
sum(l.product_qty/u.factor*u2.factor) as quantity,
extract(epoch from age(s.date_approve,s.date_order))/(24*60*60)::decimal(16,2) as delay,
extract(epoch from age(l.date_planned,s.date_order))/(24*60*60)::decimal(16,2) as delay_pass,
count(*) as nbr,
sum(l.price_unit/cr.rate*l.product_qty)::decimal(16,2) as price_total,
avg(100.0 * (l.price_unit/cr.rate*l.product_qty) / NULLIF(ip.value_float*l.product_qty/u.factor*u2.factor, 0.0))::decimal(16,2) as negociation,
sum(ip.value_float*l.product_qty/u.factor*u2.factor)::decimal(16,2) as price_standard,
(sum(l.product_qty*l.price_unit/cr.rate)/NULLIF(sum(l.product_qty/u.factor*u2.factor),0.0))::decimal(16,2) as price_average
from purchase_order_line l
join purchase_order s on (l.order_id=s.id)
left join product_product p on (l.product_id=p.id)
left join product_template t on (p.product_tmpl_id=t.id)
LEFT JOIN ir_property ip ON (ip.name='standard_price' AND ip.res_id=CONCAT('product.template,',t.id) AND ip.company_id=s.company_id)
left join product_uom u on (u.id=l.product_uom)
left join product_uom u2 on (u2.id=t.uom_id)
left join stock_picking_type spt on (spt.id=s.picking_type_id)
join currency_rate cr on (cr.currency_id = s.currency_id and
cr.date_start <= coalesce(s.date_order, now()) and
(cr.date_end is null or cr.date_end > coalesce(s.date_order, now())))
group by
s.company_id,
s.create_uid,
s.partner_id,
u.factor,
s.location_id,
l.price_unit,
s.date_approve,
l.date_planned,
l.product_uom,
s.minimum_planned_date,
s.pricelist_id,
s.validator,
s.dest_address_id,
l.product_id,
t.categ_id,
s.date_order,
l.state,
spt.warehouse_id,
u.uom_type,
u.category_id,
t.uom_id,
u.id,
u2.factor
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
2ndQuadrant/ansible | test/runner/lib/docker_util.py | 29 | 7331 | """Functions for accessing docker via the docker cli."""
from __future__ import absolute_import, print_function
import json
import os
import time
from lib.executor import (
SubprocessError,
)
from lib.util import (
ApplicationError,
run_command,
common_environment,
display,
find_executable,
)
from lib.config import (
EnvironmentConfig,
)
BUFFER_SIZE = 256 * 256
def docker_available():
"""
:rtype: bool
"""
return find_executable('docker', required=False)
def get_docker_container_id():
"""
:rtype: str | None
"""
path = '/proc/self/cgroup'
if not os.path.exists(path):
return None
with open(path) as cgroup_fd:
contents = cgroup_fd.read()
paths = [line.split(':')[2] for line in contents.splitlines()]
container_ids = set(path.split('/')[2] for path in paths if path.startswith('/docker/'))
if not container_ids:
return None
if len(container_ids) == 1:
return container_ids.pop()
raise ApplicationError('Found multiple container_id candidates: %s\n%s' % (sorted(container_ids), contents))
def get_docker_container_ip(args, container_id):
"""
:type args: EnvironmentConfig
:type container_id: str
:rtype: str
"""
results = docker_inspect(args, container_id)
ipaddress = results[0]['NetworkSettings']['IPAddress']
return ipaddress
def get_docker_networks(args, container_id):
"""
:param args: EnvironmentConfig
:param container_id: str
:rtype: list[str]
"""
results = docker_inspect(args, container_id)
networks = sorted(results[0]['NetworkSettings']['Networks'])
return networks
def docker_pull(args, image):
"""
:type args: EnvironmentConfig
:type image: str
"""
if ('@' in image or ':' in image) and docker_images(args, image):
display.info('Skipping docker pull of existing image with tag or digest: %s' % image, verbosity=2)
return
if not args.docker_pull:
display.warning('Skipping docker pull for "%s". Image may be out-of-date.' % image)
return
for _ in range(1, 10):
try:
docker_command(args, ['pull', image])
return
except SubprocessError:
display.warning('Failed to pull docker image "%s". Waiting a few seconds before trying again.' % image)
time.sleep(3)
raise ApplicationError('Failed to pull docker image "%s".' % image)
def docker_put(args, container_id, src, dst):
"""
:type args: EnvironmentConfig
:type container_id: str
:type src: str
:type dst: str
"""
# avoid 'docker cp' due to a bug which causes 'docker rm' to fail
with open(src, 'rb') as src_fd:
docker_exec(args, container_id, ['dd', 'of=%s' % dst, 'bs=%s' % BUFFER_SIZE],
options=['-i'], stdin=src_fd, capture=True)
def docker_get(args, container_id, src, dst):
"""
:type args: EnvironmentConfig
:type container_id: str
:type src: str
:type dst: str
"""
# avoid 'docker cp' due to a bug which causes 'docker rm' to fail
with open(dst, 'wb') as dst_fd:
docker_exec(args, container_id, ['dd', 'if=%s' % src, 'bs=%s' % BUFFER_SIZE],
options=['-i'], stdout=dst_fd, capture=True)
def docker_run(args, image, options, cmd=None):
"""
:type args: EnvironmentConfig
:type image: str
:type options: list[str] | None
:type cmd: list[str] | None
:rtype: str | None, str | None
"""
if not options:
options = []
if not cmd:
cmd = []
for _ in range(1, 3):
try:
return docker_command(args, ['run'] + options + [image] + cmd, capture=True)
except SubprocessError as ex:
display.error(ex)
display.warning('Failed to run docker image "%s". Waiting a few seconds before trying again.' % image)
time.sleep(3)
raise ApplicationError('Failed to run docker image "%s".' % image)
def docker_images(args, image):
"""
:param args: CommonConfig
:param image: str
:rtype: list[dict[str, any]]
"""
stdout, _dummy = docker_command(args, ['images', image, '--format', '{{json .}}'], capture=True, always=True)
results = [json.loads(line) for line in stdout.splitlines()]
return results
def docker_rm(args, container_id):
"""
:type args: EnvironmentConfig
:type container_id: str
"""
docker_command(args, ['rm', '-f', container_id], capture=True)
def docker_inspect(args, container_id):
"""
:type args: EnvironmentConfig
:type container_id: str
:rtype: list[dict]
"""
if args.explain:
return []
try:
stdout, _ = docker_command(args, ['inspect', container_id], capture=True)
return json.loads(stdout)
except SubprocessError as ex:
try:
return json.loads(ex.stdout)
except Exception:
raise ex
def docker_network_disconnect(args, container_id, network):
"""
:param args: EnvironmentConfig
:param container_id: str
:param network: str
"""
docker_command(args, ['network', 'disconnect', network, container_id], capture=True)
def docker_network_inspect(args, network):
"""
:type args: EnvironmentConfig
:type network: str
:rtype: list[dict]
"""
if args.explain:
return []
try:
stdout, _ = docker_command(args, ['network', 'inspect', network], capture=True)
return json.loads(stdout)
except SubprocessError as ex:
try:
return json.loads(ex.stdout)
except Exception:
raise ex
def docker_exec(args, container_id, cmd, options=None, capture=False, stdin=None, stdout=None):
"""
:type args: EnvironmentConfig
:type container_id: str
:type cmd: list[str]
:type options: list[str] | None
:type capture: bool
:type stdin: file | None
:type stdout: file | None
:rtype: str | None, str | None
"""
if not options:
options = []
return docker_command(args, ['exec'] + options + [container_id] + cmd, capture=capture, stdin=stdin, stdout=stdout)
def docker_info(args):
"""
:param args: CommonConfig
:rtype: dict[str, any]
"""
stdout, _dummy = docker_command(args, ['info', '--format', '{{json .}}'], capture=True, always=True)
return json.loads(stdout)
def docker_version(args):
"""
:param args: CommonConfig
:rtype: dict[str, any]
"""
stdout, _dummy = docker_command(args, ['version', '--format', '{{json .}}'], capture=True, always=True)
return json.loads(stdout)
def docker_command(args, cmd, capture=False, stdin=None, stdout=None, always=False):
"""
:type args: CommonConfig
:type cmd: list[str]
:type capture: bool
:type stdin: file | None
:type stdout: file | None
:type always: bool
:rtype: str | None, str | None
"""
env = docker_environment()
return run_command(args, ['docker'] + cmd, env=env, capture=capture, stdin=stdin, stdout=stdout, always=always)
def docker_environment():
"""
:rtype: dict[str, str]
"""
env = common_environment()
env.update(dict((key, os.environ[key]) for key in os.environ if key.startswith('DOCKER_')))
return env
| gpl-3.0 |
huongttlan/statsmodels | statsmodels/sandbox/examples/example_sysreg.py | 31 | 8043 | """Example: statsmodels.sandbox.sysreg
"""
#TODO: this is going to change significantly once we have a panel data structure
from statsmodels.compat.python import lmap, asbytes
import numpy as np
import statsmodels.api as sm
from statsmodels.sandbox.sysreg import *
#for Python 3 compatibility
# Seemingly Unrelated Regressions (SUR) Model
# This example uses the subset of the Grunfeld data in Greene's Econometric
# Analysis Chapter 14 (5th Edition)
grun_data = sm.datasets.grunfeld.load()
firms = ['General Motors', 'Chrysler', 'General Electric', 'Westinghouse',
'US Steel']
#for Python 3 compatibility
firms = lmap(asbytes, firms)
grun_exog = grun_data.exog
grun_endog = grun_data.endog
# Right now takes SUR takes a list of arrays
# The array alternates between the LHS of an equation and RHS side of an
# equation
# This is very likely to change
grun_sys = []
for i in firms:
index = grun_exog['firm'] == i
grun_sys.append(grun_endog[index])
exog = grun_exog[index][['value','capital']].view(float).reshape(-1,2)
exog = sm.add_constant(exog, prepend=True)
grun_sys.append(exog)
# Note that the results in Greene (5th edition) uses a slightly different
# version of the Grunfeld data. To reproduce Table 14.1 the following changes
# are necessary.
grun_sys[-2][5] = 261.6
grun_sys[-2][-3] = 645.2
grun_sys[-1][11,2] = 232.6
grun_mod = SUR(grun_sys)
grun_res = grun_mod.fit()
print("Results for the 2-step GLS")
print("Compare to Greene Table 14.1, 5th edition")
print(grun_res.params)
# or you can do an iterative fit
# you have to define a new model though this will be fixed
# TODO: note the above
print("Results for iterative GLS (equivalent to MLE)")
print("Compare to Greene Table 14.3")
#TODO: these are slightly off, could be a convergence issue
# or might use a different default DOF correction?
grun_imod = SUR(grun_sys)
grun_ires = grun_imod.fit(igls=True)
print(grun_ires.params)
# Two-Stage Least Squares for Simultaneous Equations
#TODO: we are going to need *some kind* of formula framework
# This follows the simple macroeconomic model given in
# Greene Example 15.1 (5th Edition)
# The data however is from statsmodels and is not the same as
# Greene's
# The model is
# consumption: c_{t} = \alpha_{0} + \alpha_{1}y_{t} + \alpha_{2}c_{t-1} + \epsilon_{t1}
# investment: i_{t} = \beta_{0} + \beta_{1}r_{t} + \beta_{2}\left(y_{t}-y_{t-1}\right) + \epsilon_{t2}
# demand: y_{t} = c_{t} + I_{t} + g_{t}
# See Greene's Econometric Analysis for more information
# Load the data
macrodata = sm.datasets.macrodata.load().data
# Not needed, but make sure the data is sorted
macrodata = np.sort(macrodata, order=['year','quarter'])
# Impose the demand restriction
y = macrodata['realcons'] + macrodata['realinv'] + macrodata['realgovt']
# Build the system
macro_sys = []
# First equation LHS
macro_sys.append(macrodata['realcons'][1:]) # leave off first date
# First equation RHS
exog1 = np.column_stack((y[1:],macrodata['realcons'][:-1]))
#TODO: it might be nice to have "lag" and "lead" functions
exog1 = sm.add_constant(exog1, prepend=True)
macro_sys.append(exog1)
# Second equation LHS
macro_sys.append(macrodata['realinv'][1:])
# Second equation RHS
exog2 = np.column_stack((macrodata['tbilrate'][1:], np.diff(y)))
exog2 = sm.add_constant(exog2, prepend=True)
macro_sys.append(exog2)
# We need to say that y_{t} in the RHS of equation 1 is an endogenous regressor
# We will call these independent endogenous variables
# Right now, we use a dictionary to declare these
indep_endog = {0 : [1]}
# We also need to create a design of our instruments
# This will be done automatically in the future
instruments = np.column_stack((macrodata[['realgovt',
'tbilrate']][1:].view(float).reshape(-1,2),macrodata['realcons'][:-1],
y[:-1]))
instruments = sm.add_constant(instruments, prepend=True)
macro_mod = Sem2SLS(macro_sys, indep_endog=indep_endog, instruments=instruments)
# Right now this only returns parameters
macro_params = macro_mod.fit()
print("The parameters for the first equation are correct.")
print("The parameters for the second equation are not.")
print(macro_params)
#TODO: Note that the above is incorrect, because we have no way of telling the
# model that *part* of the y_{t} - y_{t-1} is an independent endogenous variable
# To correct for this we would have to do the following
y_instrumented = macro_mod.wexog[0][:,1]
whitened_ydiff = y_instrumented - y[:-1]
wexog = np.column_stack((macrodata['tbilrate'][1:],whitened_ydiff))
wexog = sm.add_constant(wexog, prepend=True)
correct_params = sm.GLS(macrodata['realinv'][1:], wexog).fit().params
print("If we correctly instrument everything, then these are the parameters")
print("for the second equation")
print(correct_params)
print("Compare to output of R script statsmodels/sandbox/tests/macrodata.s")
print('\nUsing IV2SLS')
from statsmodels.sandbox.regression.gmm import IV2SLS
miv = IV2SLS(macro_sys[0], macro_sys[1], instruments)
resiv = miv.fit()
print("equation 1")
print(resiv.params)
miv2 = IV2SLS(macro_sys[2], macro_sys[3], instruments)
resiv2 = miv2.fit()
print("equation 2")
print(resiv2.params)
### Below is the same example using Greene's data ###
run_greene = 0
if run_greene:
try:
data3 = np.genfromtxt('/home/skipper/school/MetricsII/Greene \
TableF5-1.txt', names=True)
except:
raise ValueError("Based on Greene TableF5-1. You should download it "
"from his web site and edit this script accordingly.")
# Example 15.1 in Greene 5th Edition
# c_t = constant + y_t + c_t-1
# i_t = constant + r_t + (y_t - y_t-1)
# y_t = c_t + i_t + g_t
sys3 = []
sys3.append(data3['realcons'][1:]) # have to leave off a beg. date
# impose 3rd equation on y
y = data3['realcons'] + data3['realinvs'] + data3['realgovt']
exog1 = np.column_stack((y[1:],data3['realcons'][:-1]))
exog1 = sm.add_constant(exog1, prepend=False)
sys3.append(exog1)
sys3.append(data3['realinvs'][1:])
exog2 = np.column_stack((data3['tbilrate'][1:],
np.diff(y)))
# realint is missing 1st observation
exog2 = sm.add_constant(exog2, prepend=False)
sys3.append(exog2)
indep_endog = {0 : [0]} # need to be able to say that y_1 is an instrument..
instruments = np.column_stack((data3[['realgovt',
'tbilrate']][1:].view(float).reshape(-1,2),data3['realcons'][:-1],
y[:-1]))
instruments = sm.add_constant(instruments, prepend=False)
sem_mod = Sem2SLS(sys3, indep_endog = indep_endog, instruments=instruments)
sem_params = sem_mod.fit() # first equation is right, but not second?
# should y_t in the diff be instrumented?
# how would R know this in the script?
# well, let's check...
y_instr = sem_mod.wexog[0][:,0]
wyd = y_instr - y[:-1]
wexog = np.column_stack((data3['tbilrate'][1:],wyd))
wexog = sm.add_constant(wexog, prepend=False)
params = sm.GLS(data3['realinvs'][1:], wexog).fit().params
print("These are the simultaneous equation estimates for Greene's \
example 13-1 (Also application 13-1 in 6th edition.")
print(sem_params)
print("The first set of parameters is correct. The second set is not.")
print("Compare to the solution manual at \
http://pages.stern.nyu.edu/~wgreene/Text/econometricanalysis.htm")
print("The reason is the restriction on (y_t - y_1)")
print("Compare to R script GreeneEx15_1.s")
print("Somehow R carries y.1 in yd to know that it needs to be \
instrumented")
print("If we replace our estimate with the instrumented one")
print(params)
print("We get the right estimate")
print("Without a formula framework we have to be able to do restrictions.")
# yep!, but how in the world does R know this when we just fed it yd??
# must be implicit in the formula framework...
# we are going to need to keep the two equations separate and use
# a restrictions matrix. Ugh, is a formula framework really, necessary to get
# around this?
| bsd-3-clause |
ossdemura/django-miniblog | Lib/site-packages/pip/_vendor/html5lib/treeadapters/sax.py | 1835 | 1661 | from __future__ import absolute_import, division, unicode_literals
from xml.sax.xmlreader import AttributesNSImpl
from ..constants import adjustForeignAttributes, unadjustForeignAttributes
prefix_mapping = {}
for prefix, localName, namespace in adjustForeignAttributes.values():
if prefix is not None:
prefix_mapping[prefix] = namespace
def to_sax(walker, handler):
"""Call SAX-like content handler based on treewalker walker"""
handler.startDocument()
for prefix, namespace in prefix_mapping.items():
handler.startPrefixMapping(prefix, namespace)
for token in walker:
type = token["type"]
if type == "Doctype":
continue
elif type in ("StartTag", "EmptyTag"):
attrs = AttributesNSImpl(token["data"],
unadjustForeignAttributes)
handler.startElementNS((token["namespace"], token["name"]),
token["name"],
attrs)
if type == "EmptyTag":
handler.endElementNS((token["namespace"], token["name"]),
token["name"])
elif type == "EndTag":
handler.endElementNS((token["namespace"], token["name"]),
token["name"])
elif type in ("Characters", "SpaceCharacters"):
handler.characters(token["data"])
elif type == "Comment":
pass
else:
assert False, "Unknown token type"
for prefix, namespace in prefix_mapping.items():
handler.endPrefixMapping(prefix)
handler.endDocument()
| mit |
freakynit/kaggle-ndsb | configurations/bagging_15_convroll4_big_weightdecay_resume.py | 6 | 5502 | import numpy as np
import theano
import theano.tensor as T
import lasagne as nn
import data
import load
import nn_plankton
import dihedral
import dihedral_fast
import tmp_dnn
import tta
resume_path = "metadata/bagging_15_convroll4_big_weightdecay-schaap-20150306-105118.pkl"
validation_split_path = "splits/bagging_split_15.pkl"
patch_size = (95, 95)
augmentation_params = {
'zoom_range': (1 / 1.6, 1.6),
'rotation_range': (0, 360),
'shear_range': (-20, 20),
'translation_range': (-10, 10),
'do_flip': True,
'allow_stretch': 1.3,
}
batch_size = 128 // 4
chunk_size = 32768 // 4
num_chunks_train = 840
momentum = 0.9
learning_rate_schedule = {
0: 0.003,
700: 0.0003,
800: 0.00003,
}
validate_every = 20
save_every = 20
def estimate_scale(img):
return np.maximum(img.shape[0], img.shape[1]) / 85.0
# augmentation_transforms_test = []
# for flip in [True, False]:
# for zoom in [1/1.3, 1/1.2, 1/1.1, 1.0, 1.1, 1.2, 1.3]:
# for rot in np.linspace(0.0, 360.0, 5, endpoint=False):
# tf = data.build_augmentation_transform(zoom=(zoom, zoom), rotation=rot, flip=flip)
# augmentation_transforms_test.append(tf)
augmentation_transforms_test = tta.build_quasirandom_transforms(70, **{
'zoom_range': (1 / 1.4, 1.4),
'rotation_range': (0, 360),
'shear_range': (-10, 10),
'translation_range': (-8, 8),
'do_flip': True,
'allow_stretch': 1.2,
})
data_loader = load.ZmuvRescaledDataLoader(estimate_scale=estimate_scale, num_chunks_train=num_chunks_train,
patch_size=patch_size, chunk_size=chunk_size, augmentation_params=augmentation_params,
augmentation_transforms_test=augmentation_transforms_test, validation_split_path=validation_split_path)
# Conv2DLayer = nn.layers.cuda_convnet.Conv2DCCLayer
# MaxPool2DLayer = nn.layers.cuda_convnet.MaxPool2DCCLayer
Conv2DLayer = tmp_dnn.Conv2DDNNLayer
MaxPool2DLayer = tmp_dnn.MaxPool2DDNNLayer
def build_model():
l0 = nn.layers.InputLayer((batch_size, 1, patch_size[0], patch_size[1]))
l0c = dihedral.CyclicSliceLayer(l0)
l1a = Conv2DLayer(l0c, num_filters=32, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l1b = Conv2DLayer(l1a, num_filters=16, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l1 = MaxPool2DLayer(l1b, ds=(3, 3), strides=(2, 2))
l1r = dihedral_fast.CyclicConvRollLayer(l1)
l2a = Conv2DLayer(l1r, num_filters=64, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l2b = Conv2DLayer(l2a, num_filters=32, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l2 = MaxPool2DLayer(l2b, ds=(3, 3), strides=(2, 2))
l2r = dihedral_fast.CyclicConvRollLayer(l2)
l3a = Conv2DLayer(l2r, num_filters=128, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l3b = Conv2DLayer(l3a, num_filters=128, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l3c = Conv2DLayer(l3b, num_filters=64, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l3 = MaxPool2DLayer(l3c, ds=(3, 3), strides=(2, 2))
l3r = dihedral_fast.CyclicConvRollLayer(l3)
l4a = Conv2DLayer(l3r, num_filters=256, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l4b = Conv2DLayer(l4a, num_filters=256, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l4c = Conv2DLayer(l4b, num_filters=128, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l4 = MaxPool2DLayer(l4c, ds=(3, 3), strides=(2, 2))
l4r = dihedral_fast.CyclicConvRollLayer(l4)
l4f = nn.layers.flatten(l4r)
l5 = nn.layers.DenseLayer(nn.layers.dropout(l4f, p=0.5), num_units=512, W=nn_plankton.Orthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu)
l5r = dihedral_fast.CyclicRollLayer(l5)
l6 = nn.layers.DenseLayer(nn.layers.dropout(l5r, p=0.5), num_units=512, W=nn_plankton.Orthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu)
l6m = dihedral.CyclicPoolLayer(l6, pool_function=nn_plankton.rms)
l7 = nn.layers.DenseLayer(nn.layers.dropout(l6m, p=0.5), num_units=data.num_classes, nonlinearity=T.nnet.softmax, W=nn_plankton.Orthogonal(1.0))
return [l0], l7
def build_objective(l_ins, l_out):
lambda_reg = 0.0005
params = nn.layers.get_all_non_bias_params(l_out)
reg_term = sum(T.sum(p**2) for p in params)
def loss(y, t):
return nn_plankton.log_loss(y, t) + lambda_reg * reg_term
return nn.objectives.Objective(l_out, loss_function=loss)
| mit |
stefan-jonasson/home-assistant | homeassistant/components/telegram_bot/__init__.py | 2 | 26538 | """
Component to send and receive Telegram messages.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/telegram_bot/
"""
import asyncio
import io
from functools import partial
import logging
import os
import requests
from requests.auth import HTTPBasicAuth, HTTPDigestAuth
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_DATA, ATTR_MESSAGE, ATTR_TITLE)
from homeassistant.config import load_yaml_config_file
from homeassistant.const import (
ATTR_COMMAND, ATTR_LATITUDE, ATTR_LONGITUDE, CONF_API_KEY,
CONF_PLATFORM, CONF_TIMEOUT, HTTP_DIGEST_AUTHENTICATION)
import homeassistant.helpers.config_validation as cv
from homeassistant.exceptions import TemplateError
from homeassistant.setup import async_prepare_setup_platform
REQUIREMENTS = ['python-telegram-bot==8.1.1']
_LOGGER = logging.getLogger(__name__)
ATTR_ARGS = 'args'
ATTR_AUTHENTICATION = 'authentication'
ATTR_CALLBACK_QUERY = 'callback_query'
ATTR_CALLBACK_QUERY_ID = 'callback_query_id'
ATTR_CAPTION = 'caption'
ATTR_CHAT_ID = 'chat_id'
ATTR_CHAT_INSTANCE = 'chat_instance'
ATTR_DISABLE_NOTIF = 'disable_notification'
ATTR_DISABLE_WEB_PREV = 'disable_web_page_preview'
ATTR_EDITED_MSG = 'edited_message'
ATTR_FILE = 'file'
ATTR_FROM_FIRST = 'from_first'
ATTR_FROM_LAST = 'from_last'
ATTR_KEYBOARD = 'keyboard'
ATTR_KEYBOARD_INLINE = 'inline_keyboard'
ATTR_MESSAGEID = 'message_id'
ATTR_MSG = 'message'
ATTR_MSGID = 'id'
ATTR_PARSER = 'parse_mode'
ATTR_PASSWORD = 'password'
ATTR_REPLY_TO_MSGID = 'reply_to_message_id'
ATTR_REPLYMARKUP = 'reply_markup'
ATTR_SHOW_ALERT = 'show_alert'
ATTR_TARGET = 'target'
ATTR_TEXT = 'text'
ATTR_URL = 'url'
ATTR_USER_ID = 'user_id'
ATTR_USERNAME = 'username'
CONF_ALLOWED_CHAT_IDS = 'allowed_chat_ids'
CONF_PROXY_URL = 'proxy_url'
CONF_PROXY_PARAMS = 'proxy_params'
DOMAIN = 'telegram_bot'
SERVICE_SEND_MESSAGE = 'send_message'
SERVICE_SEND_PHOTO = 'send_photo'
SERVICE_SEND_DOCUMENT = 'send_document'
SERVICE_SEND_LOCATION = 'send_location'
SERVICE_EDIT_MESSAGE = 'edit_message'
SERVICE_EDIT_CAPTION = 'edit_caption'
SERVICE_EDIT_REPLYMARKUP = 'edit_replymarkup'
SERVICE_ANSWER_CALLBACK_QUERY = 'answer_callback_query'
SERVICE_DELETE_MESSAGE = 'delete_message'
EVENT_TELEGRAM_CALLBACK = 'telegram_callback'
EVENT_TELEGRAM_COMMAND = 'telegram_command'
EVENT_TELEGRAM_TEXT = 'telegram_text'
PARSER_HTML = 'html'
PARSER_MD = 'markdown'
PLATFORM_SCHEMA = cv.PLATFORM_SCHEMA.extend({
vol.Required(CONF_PLATFORM): cv.string,
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_ALLOWED_CHAT_IDS):
vol.All(cv.ensure_list, [vol.Coerce(int)]),
vol.Optional(ATTR_PARSER, default=PARSER_MD): cv.string,
vol.Optional(CONF_PROXY_URL): cv.string,
vol.Optional(CONF_PROXY_PARAMS): dict,
})
BASE_SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_TARGET): vol.All(cv.ensure_list, [vol.Coerce(int)]),
vol.Optional(ATTR_PARSER): cv.string,
vol.Optional(ATTR_DISABLE_NOTIF): cv.boolean,
vol.Optional(ATTR_DISABLE_WEB_PREV): cv.boolean,
vol.Optional(ATTR_KEYBOARD): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_KEYBOARD_INLINE): cv.ensure_list,
}, extra=vol.ALLOW_EXTRA)
SERVICE_SCHEMA_SEND_MESSAGE = BASE_SERVICE_SCHEMA.extend({
vol.Required(ATTR_MESSAGE): cv.template,
vol.Optional(ATTR_TITLE): cv.template,
})
SERVICE_SCHEMA_SEND_FILE = BASE_SERVICE_SCHEMA.extend({
vol.Optional(ATTR_URL): cv.template,
vol.Optional(ATTR_FILE): cv.template,
vol.Optional(ATTR_CAPTION): cv.template,
vol.Optional(ATTR_USERNAME): cv.string,
vol.Optional(ATTR_PASSWORD): cv.string,
vol.Optional(ATTR_AUTHENTICATION): cv.string,
})
SERVICE_SCHEMA_SEND_LOCATION = BASE_SERVICE_SCHEMA.extend({
vol.Required(ATTR_LONGITUDE): cv.template,
vol.Required(ATTR_LATITUDE): cv.template,
})
SERVICE_SCHEMA_EDIT_MESSAGE = SERVICE_SCHEMA_SEND_MESSAGE.extend({
vol.Required(ATTR_MESSAGEID):
vol.Any(cv.positive_int, vol.All(cv.string, 'last')),
vol.Required(ATTR_CHAT_ID): vol.Coerce(int),
})
SERVICE_SCHEMA_EDIT_CAPTION = vol.Schema({
vol.Required(ATTR_MESSAGEID):
vol.Any(cv.positive_int, vol.All(cv.string, 'last')),
vol.Required(ATTR_CHAT_ID): vol.Coerce(int),
vol.Required(ATTR_CAPTION): cv.template,
vol.Optional(ATTR_KEYBOARD_INLINE): cv.ensure_list,
}, extra=vol.ALLOW_EXTRA)
SERVICE_SCHEMA_EDIT_REPLYMARKUP = vol.Schema({
vol.Required(ATTR_MESSAGEID):
vol.Any(cv.positive_int, vol.All(cv.string, 'last')),
vol.Required(ATTR_CHAT_ID): vol.Coerce(int),
vol.Required(ATTR_KEYBOARD_INLINE): cv.ensure_list,
}, extra=vol.ALLOW_EXTRA)
SERVICE_SCHEMA_ANSWER_CALLBACK_QUERY = vol.Schema({
vol.Required(ATTR_MESSAGE): cv.template,
vol.Required(ATTR_CALLBACK_QUERY_ID): vol.Coerce(int),
vol.Optional(ATTR_SHOW_ALERT): cv.boolean,
}, extra=vol.ALLOW_EXTRA)
SERVICE_SCHEMA_DELETE_MESSAGE = vol.Schema({
vol.Required(ATTR_CHAT_ID): vol.Coerce(int),
vol.Required(ATTR_MESSAGEID):
vol.Any(cv.positive_int, vol.All(cv.string, 'last')),
}, extra=vol.ALLOW_EXTRA)
SERVICE_MAP = {
SERVICE_SEND_MESSAGE: SERVICE_SCHEMA_SEND_MESSAGE,
SERVICE_SEND_PHOTO: SERVICE_SCHEMA_SEND_FILE,
SERVICE_SEND_DOCUMENT: SERVICE_SCHEMA_SEND_FILE,
SERVICE_SEND_LOCATION: SERVICE_SCHEMA_SEND_LOCATION,
SERVICE_EDIT_MESSAGE: SERVICE_SCHEMA_EDIT_MESSAGE,
SERVICE_EDIT_CAPTION: SERVICE_SCHEMA_EDIT_CAPTION,
SERVICE_EDIT_REPLYMARKUP: SERVICE_SCHEMA_EDIT_REPLYMARKUP,
SERVICE_ANSWER_CALLBACK_QUERY: SERVICE_SCHEMA_ANSWER_CALLBACK_QUERY,
SERVICE_DELETE_MESSAGE: SERVICE_SCHEMA_DELETE_MESSAGE,
}
def load_data(hass, url=None, filepath=None, username=None, password=None,
authentication=None, num_retries=5):
"""Load photo/document into ByteIO/File container from a source."""
try:
if url is not None:
# Load photo from URL
params = {"timeout": 15}
if username is not None and password is not None:
if authentication == HTTP_DIGEST_AUTHENTICATION:
params["auth"] = HTTPDigestAuth(username, password)
else:
params["auth"] = HTTPBasicAuth(username, password)
retry_num = 0
while retry_num < num_retries:
req = requests.get(url, **params)
if not req.ok:
_LOGGER.warning("Status code %s (retry #%s) loading %s.",
req.status_code, retry_num + 1, url)
else:
data = io.BytesIO(req.content)
if data.read():
data.seek(0)
data.name = url
return data
_LOGGER.warning("Empty data (retry #%s) in %s).",
retry_num + 1, url)
retry_num += 1
_LOGGER.warning("Can't load photo in %s after %s retries.",
url, retry_num)
elif filepath is not None:
if hass.config.is_allowed_path(filepath):
return open(filepath, "rb")
_LOGGER.warning("'%s' are not secure to load data from!", filepath)
else:
_LOGGER.warning("Can't load photo. No photo found in params!")
except (OSError, TypeError) as error:
_LOGGER.error("Can't load photo into ByteIO: %s", error)
return None
@asyncio.coroutine
def async_setup(hass, config):
"""Set up the Telegram bot component."""
if not config[DOMAIN]:
return False
p_config = config[DOMAIN][0]
descriptions = yield from hass.async_add_job(
load_yaml_config_file,
os.path.join(os.path.dirname(__file__), 'services.yaml'))
p_type = p_config.get(CONF_PLATFORM)
platform = yield from async_prepare_setup_platform(
hass, config, DOMAIN, p_type)
if platform is None:
return
_LOGGER.info("Setting up %s.%s", DOMAIN, p_type)
try:
receiver_service = yield from \
platform.async_setup_platform(hass, p_config)
if receiver_service is False:
_LOGGER.error(
"Failed to initialize Telegram bot %s", p_type)
return False
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error setting up platform %s", p_type)
return False
notify_service = TelegramNotificationService(
hass,
p_config.get(CONF_API_KEY),
p_config.get(CONF_ALLOWED_CHAT_IDS),
p_config.get(ATTR_PARSER),
p_config.get(CONF_PROXY_URL),
p_config.get(CONF_PROXY_PARAMS)
)
@asyncio.coroutine
def async_send_telegram_message(service):
"""Handle sending Telegram Bot message service calls."""
def _render_template_attr(data, attribute):
attribute_templ = data.get(attribute)
if attribute_templ:
if any([isinstance(attribute_templ, vtype)
for vtype in [float, int, str]]):
data[attribute] = attribute_templ
else:
attribute_templ.hass = hass
try:
data[attribute] = attribute_templ.async_render()
except TemplateError as exc:
_LOGGER.error(
"TemplateError in %s: %s -> %s",
attribute, attribute_templ.template, exc)
data[attribute] = attribute_templ.template
msgtype = service.service
kwargs = dict(service.data)
for attribute in [ATTR_MESSAGE, ATTR_TITLE, ATTR_URL, ATTR_FILE,
ATTR_CAPTION, ATTR_LONGITUDE, ATTR_LATITUDE]:
_render_template_attr(kwargs, attribute)
_LOGGER.debug("New telegram message %s: %s", msgtype, kwargs)
if msgtype == SERVICE_SEND_MESSAGE:
yield from hass.async_add_job(
partial(notify_service.send_message, **kwargs))
elif msgtype == SERVICE_SEND_PHOTO:
yield from hass.async_add_job(
partial(notify_service.send_file, True, **kwargs))
elif msgtype == SERVICE_SEND_DOCUMENT:
yield from hass.async_add_job(
partial(notify_service.send_file, False, **kwargs))
elif msgtype == SERVICE_SEND_LOCATION:
yield from hass.async_add_job(
partial(notify_service.send_location, **kwargs))
elif msgtype == SERVICE_ANSWER_CALLBACK_QUERY:
yield from hass.async_add_job(
partial(notify_service.answer_callback_query, **kwargs))
elif msgtype == SERVICE_DELETE_MESSAGE:
yield from hass.async_add_job(
partial(notify_service.delete_message, **kwargs))
else:
yield from hass.async_add_job(
partial(notify_service.edit_message, msgtype, **kwargs))
# Register notification services
for service_notif, schema in SERVICE_MAP.items():
hass.services.async_register(
DOMAIN, service_notif, async_send_telegram_message,
descriptions.get(service_notif), schema=schema)
return True
class TelegramNotificationService:
"""Implement the notification services for the Telegram Bot domain."""
def __init__(self, hass, api_key, allowed_chat_ids, parser,
proxy_url=None, proxy_params=None):
"""Initialize the service."""
from telegram import Bot
from telegram.parsemode import ParseMode
from telegram.utils.request import Request
self.allowed_chat_ids = allowed_chat_ids
self._default_user = self.allowed_chat_ids[0]
self._last_message_id = {user: None for user in self.allowed_chat_ids}
self._parsers = {PARSER_HTML: ParseMode.HTML,
PARSER_MD: ParseMode.MARKDOWN}
self._parse_mode = self._parsers.get(parser)
request = None
if proxy_url is not None:
request = Request(proxy_url=proxy_url,
urllib3_proxy_kwargs=proxy_params)
self.bot = Bot(token=api_key, request=request)
self.hass = hass
def _get_msg_ids(self, msg_data, chat_id):
"""Get the message id to edit.
This can be one of (message_id, inline_message_id) from a msg dict,
returning a tuple.
**You can use 'last' as message_id** to edit
the last sended message in the chat_id.
"""
message_id = inline_message_id = None
if ATTR_MESSAGEID in msg_data:
message_id = msg_data[ATTR_MESSAGEID]
if (isinstance(message_id, str) and (message_id == 'last') and
(self._last_message_id[chat_id] is not None)):
message_id = self._last_message_id[chat_id]
else:
inline_message_id = msg_data['inline_message_id']
return message_id, inline_message_id
def _get_target_chat_ids(self, target):
"""Validate chat_id targets or return default target (first).
:param target: optional list of integers ([12234, -12345])
:return list of chat_id targets (integers)
"""
if target is not None:
if isinstance(target, int):
target = [target]
chat_ids = [t for t in target if t in self.allowed_chat_ids]
if chat_ids:
return chat_ids
_LOGGER.warning("Unallowed targets: %s, using default: %s",
target, self._default_user)
return [self._default_user]
def _get_msg_kwargs(self, data):
"""Get parameters in message data kwargs."""
def _make_row_inline_keyboard(row_keyboard):
"""Make a list of InlineKeyboardButtons.
It can accept:
- a list of tuples like:
`[(text_b1, data_callback_b1),
(text_b2, data_callback_b2), ...]
- a string like: `/cmd1, /cmd2, /cmd3`
- or a string like: `text_b1:/cmd1, text_b2:/cmd2`
"""
from telegram import InlineKeyboardButton
buttons = []
if isinstance(row_keyboard, str):
for key in row_keyboard.split(","):
if ':/' in key:
# commands like: 'Label:/cmd' become ('Label', '/cmd')
label = key.split(':/')[0]
command = key[len(label) + 1:]
buttons.append(
InlineKeyboardButton(label, callback_data=command))
else:
# commands like: '/cmd' become ('CMD', '/cmd')
label = key.strip()[1:].upper()
buttons.append(
InlineKeyboardButton(label, callback_data=key))
elif isinstance(row_keyboard, list):
for entry in row_keyboard:
text_btn, data_btn = entry
buttons.append(
InlineKeyboardButton(text_btn, callback_data=data_btn))
else:
raise ValueError(str(row_keyboard))
return buttons
# Defaults
params = {
ATTR_PARSER: self._parse_mode,
ATTR_DISABLE_NOTIF: False,
ATTR_DISABLE_WEB_PREV: None,
ATTR_REPLY_TO_MSGID: None,
ATTR_REPLYMARKUP: None,
CONF_TIMEOUT: None
}
if data is not None:
if ATTR_PARSER in data:
params[ATTR_PARSER] = self._parsers.get(
data[ATTR_PARSER], self._parse_mode)
if CONF_TIMEOUT in data:
params[CONF_TIMEOUT] = data[CONF_TIMEOUT]
if ATTR_DISABLE_NOTIF in data:
params[ATTR_DISABLE_NOTIF] = data[ATTR_DISABLE_NOTIF]
if ATTR_DISABLE_WEB_PREV in data:
params[ATTR_DISABLE_WEB_PREV] = data[ATTR_DISABLE_WEB_PREV]
if ATTR_REPLY_TO_MSGID in data:
params[ATTR_REPLY_TO_MSGID] = data[ATTR_REPLY_TO_MSGID]
# Keyboards:
if ATTR_KEYBOARD in data:
from telegram import ReplyKeyboardMarkup
keys = data.get(ATTR_KEYBOARD)
keys = keys if isinstance(keys, list) else [keys]
params[ATTR_REPLYMARKUP] = ReplyKeyboardMarkup(
[[key.strip() for key in row.split(",")] for row in keys])
elif ATTR_KEYBOARD_INLINE in data:
from telegram import InlineKeyboardMarkup
keys = data.get(ATTR_KEYBOARD_INLINE)
keys = keys if isinstance(keys, list) else [keys]
params[ATTR_REPLYMARKUP] = InlineKeyboardMarkup(
[_make_row_inline_keyboard(row) for row in keys])
return params
def _send_msg(self, func_send, msg_error, *args_msg, **kwargs_msg):
"""Send one message."""
from telegram.error import TelegramError
try:
out = func_send(*args_msg, **kwargs_msg)
if not isinstance(out, bool) and hasattr(out, ATTR_MESSAGEID):
chat_id = out.chat_id
self._last_message_id[chat_id] = out[ATTR_MESSAGEID]
_LOGGER.debug("Last message ID: %s (from chat_id %s)",
self._last_message_id, chat_id)
elif not isinstance(out, bool):
_LOGGER.warning("Update last message: out_type:%s, out=%s",
type(out), out)
return out
except TelegramError as exc:
_LOGGER.error("%s: %s. Args: %s, kwargs: %s",
msg_error, exc, args_msg, kwargs_msg)
def send_message(self, message="", target=None, **kwargs):
"""Send a message to one or multiple pre-allowed chat IDs."""
title = kwargs.get(ATTR_TITLE)
text = '{}\n{}'.format(title, message) if title else message
params = self._get_msg_kwargs(kwargs)
for chat_id in self._get_target_chat_ids(target):
_LOGGER.debug("Send message in chat ID %s with params: %s",
chat_id, params)
self._send_msg(self.bot.sendMessage,
"Error sending message",
chat_id, text, **params)
def delete_message(self, chat_id=None, **kwargs):
"""Delete a previously sent message."""
chat_id = self._get_target_chat_ids(chat_id)[0]
message_id, _ = self._get_msg_ids(kwargs, chat_id)
_LOGGER.debug("Delete message %s in chat ID %s", message_id, chat_id)
deleted = self._send_msg(self.bot.deleteMessage,
"Error deleting message",
chat_id, message_id)
# reduce message_id anyway:
if self._last_message_id[chat_id] is not None:
# change last msg_id for deque(n_msgs)?
self._last_message_id[chat_id] -= 1
return deleted
def edit_message(self, type_edit, chat_id=None, **kwargs):
"""Edit a previously sent message."""
chat_id = self._get_target_chat_ids(chat_id)[0]
message_id, inline_message_id = self._get_msg_ids(kwargs, chat_id)
params = self._get_msg_kwargs(kwargs)
_LOGGER.debug("Edit message %s in chat ID %s with params: %s",
message_id or inline_message_id, chat_id, params)
if type_edit == SERVICE_EDIT_MESSAGE:
message = kwargs.get(ATTR_MESSAGE)
title = kwargs.get(ATTR_TITLE)
text = '{}\n{}'.format(title, message) if title else message
_LOGGER.debug("Editing message with ID %s.",
message_id or inline_message_id)
return self._send_msg(self.bot.editMessageText,
"Error editing text message",
text, chat_id=chat_id, message_id=message_id,
inline_message_id=inline_message_id,
**params)
elif type_edit == SERVICE_EDIT_CAPTION:
func_send = self.bot.editMessageCaption
params[ATTR_CAPTION] = kwargs.get(ATTR_CAPTION)
else:
func_send = self.bot.editMessageReplyMarkup
return self._send_msg(func_send,
"Error editing message attributes",
chat_id=chat_id, message_id=message_id,
inline_message_id=inline_message_id,
**params)
def answer_callback_query(self, message, callback_query_id,
show_alert=False, **kwargs):
"""Answer a callback originated with a press in an inline keyboard."""
params = self._get_msg_kwargs(kwargs)
_LOGGER.debug("Answer callback query with callback ID %s: %s, "
"alert: %s.", callback_query_id, message, show_alert)
self._send_msg(self.bot.answerCallbackQuery,
"Error sending answer callback query",
callback_query_id,
text=message, show_alert=show_alert, **params)
def send_file(self, is_photo=True, target=None, **kwargs):
"""Send a photo or a document."""
params = self._get_msg_kwargs(kwargs)
caption = kwargs.get(ATTR_CAPTION)
func_send = self.bot.sendPhoto if is_photo else self.bot.sendDocument
file_content = load_data(
self.hass,
url=kwargs.get(ATTR_URL),
filepath=kwargs.get(ATTR_FILE),
username=kwargs.get(ATTR_USERNAME),
password=kwargs.get(ATTR_PASSWORD),
authentication=kwargs.get(ATTR_AUTHENTICATION),
)
if file_content:
for chat_id in self._get_target_chat_ids(target):
_LOGGER.debug("Send file to chat ID %s. Caption: %s.",
chat_id, caption)
self._send_msg(func_send, "Error sending file",
chat_id, file_content,
caption=caption, **params)
file_content.seek(0)
else:
_LOGGER.error("Can't send file with kwargs: %s", kwargs)
def send_location(self, latitude, longitude, target=None, **kwargs):
"""Send a location."""
latitude = float(latitude)
longitude = float(longitude)
params = self._get_msg_kwargs(kwargs)
for chat_id in self._get_target_chat_ids(target):
_LOGGER.debug("Send location %s/%s to chat ID %s.",
latitude, longitude, chat_id)
self._send_msg(self.bot.sendLocation,
"Error sending location",
chat_id=chat_id,
latitude=latitude, longitude=longitude, **params)
class BaseTelegramBotEntity:
"""The base class for the telegram bot."""
def __init__(self, hass, allowed_chat_ids):
"""Initialize the bot base class."""
self.allowed_chat_ids = allowed_chat_ids
self.hass = hass
def _get_message_data(self, msg_data):
"""Return boolean msg_data_is_ok and dict msg_data."""
if not msg_data:
return False, None
bad_fields = ('text' not in msg_data and
'data' not in msg_data and
'chat' not in msg_data)
if bad_fields or 'from' not in msg_data:
# Message is not correct.
_LOGGER.error("Incoming message does not have required data (%s)",
msg_data)
return False, None
if (msg_data['from'].get('id') not in self.allowed_chat_ids or
('chat' in msg_data and
msg_data['chat'].get('id') not in self.allowed_chat_ids)):
# Origin is not allowed.
_LOGGER.error("Incoming message is not allowed (%s)", msg_data)
return True, None
data = {
ATTR_USER_ID: msg_data['from']['id'],
ATTR_FROM_FIRST: msg_data['from']['first_name']
}
if 'last_name' in msg_data['from']:
data[ATTR_FROM_LAST] = msg_data['from']['last_name']
if 'chat' in msg_data:
data[ATTR_CHAT_ID] = msg_data['chat']['id']
elif ATTR_MESSAGE in msg_data and 'chat' in msg_data[ATTR_MESSAGE]:
data[ATTR_CHAT_ID] = msg_data[ATTR_MESSAGE]['chat']['id']
return True, data
def process_message(self, data):
"""Check for basic message rules and fire an event if message is ok."""
if ATTR_MSG in data or ATTR_EDITED_MSG in data:
event = EVENT_TELEGRAM_COMMAND
if ATTR_MSG in data:
data = data.get(ATTR_MSG)
else:
data = data.get(ATTR_EDITED_MSG)
message_ok, event_data = self._get_message_data(data)
if event_data is None:
return message_ok
if 'text' in data:
if data['text'][0] == '/':
pieces = data['text'].split(' ')
event_data[ATTR_COMMAND] = pieces[0]
event_data[ATTR_ARGS] = pieces[1:]
else:
event_data[ATTR_TEXT] = data['text']
event = EVENT_TELEGRAM_TEXT
else:
_LOGGER.warning("Message without text data received: %s", data)
event_data[ATTR_TEXT] = str(data)
event = EVENT_TELEGRAM_TEXT
self.hass.bus.async_fire(event, event_data)
return True
elif ATTR_CALLBACK_QUERY in data:
event = EVENT_TELEGRAM_CALLBACK
data = data.get(ATTR_CALLBACK_QUERY)
message_ok, event_data = self._get_message_data(data)
if event_data is None:
return message_ok
event_data[ATTR_DATA] = data[ATTR_DATA]
event_data[ATTR_MSG] = data[ATTR_MSG]
event_data[ATTR_CHAT_INSTANCE] = data[ATTR_CHAT_INSTANCE]
event_data[ATTR_MSGID] = data[ATTR_MSGID]
self.hass.bus.async_fire(event, event_data)
return True
else:
_LOGGER.warning("Message with unknown data received: %s", data)
return True
| mit |
grangier/django-11599 | django/utils/version.py | 320 | 1361 | import django
import os.path
import re
def get_svn_revision(path=None):
"""
Returns the SVN revision in the form SVN-XXXX,
where XXXX is the revision number.
Returns SVN-unknown if anything goes wrong, such as an unexpected
format of internal SVN files.
If path is provided, it should be a directory whose SVN info you want to
inspect. If it's not provided, this will use the root django/ package
directory.
"""
rev = None
if path is None:
path = django.__path__[0]
entries_path = '%s/.svn/entries' % path
try:
entries = open(entries_path, 'r').read()
except IOError:
pass
else:
# Versions >= 7 of the entries file are flat text. The first line is
# the version number. The next set of digits after 'dir' is the revision.
if re.match('(\d+)', entries):
rev_match = re.search('\d+\s+dir\s+(\d+)', entries)
if rev_match:
rev = rev_match.groups()[0]
# Older XML versions of the file specify revision as an attribute of
# the first entries node.
else:
from xml.dom import minidom
dom = minidom.parse(entries_path)
rev = dom.getElementsByTagName('entry')[0].getAttribute('revision')
if rev:
return u'SVN-%s' % rev
return u'SVN-unknown'
| bsd-3-clause |
zaxtax/scikit-learn | sklearn/utils/tests/test_seq_dataset.py | 47 | 2486 | # Author: Tom Dupre la Tour <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.seq_dataset import ArrayDataset, CSRDataset
from sklearn.datasets import load_iris
from numpy.testing import assert_array_equal
from nose.tools import assert_equal
iris = load_iris()
X = iris.data.astype(np.float64)
y = iris.target.astype(np.float64)
X_csr = sp.csr_matrix(X)
sample_weight = np.arange(y.size, dtype=np.float64)
def assert_csr_equal(X, Y):
X.eliminate_zeros()
Y.eliminate_zeros()
assert_equal(X.shape[0], Y.shape[0])
assert_equal(X.shape[1], Y.shape[1])
assert_array_equal(X.data, Y.data)
assert_array_equal(X.indices, Y.indices)
assert_array_equal(X.indptr, Y.indptr)
def test_seq_dataset():
dataset1 = ArrayDataset(X, y, sample_weight, seed=42)
dataset2 = CSRDataset(X_csr.data, X_csr.indptr, X_csr.indices,
y, sample_weight, seed=42)
for dataset in (dataset1, dataset2):
for i in range(5):
# next sample
xi_, yi, swi, idx = dataset._next_py()
xi = sp.csr_matrix((xi_), shape=(1, X.shape[1]))
assert_csr_equal(xi, X_csr[idx])
assert_equal(yi, y[idx])
assert_equal(swi, sample_weight[idx])
# random sample
xi_, yi, swi, idx = dataset._random_py()
xi = sp.csr_matrix((xi_), shape=(1, X.shape[1]))
assert_csr_equal(xi, X_csr[idx])
assert_equal(yi, y[idx])
assert_equal(swi, sample_weight[idx])
def test_seq_dataset_shuffle():
dataset1 = ArrayDataset(X, y, sample_weight, seed=42)
dataset2 = CSRDataset(X_csr.data, X_csr.indptr, X_csr.indices,
y, sample_weight, seed=42)
# not shuffled
for i in range(5):
_, _, _, idx1 = dataset1._next_py()
_, _, _, idx2 = dataset2._next_py()
assert_equal(idx1, i)
assert_equal(idx2, i)
for i in range(5):
_, _, _, idx1 = dataset1._random_py()
_, _, _, idx2 = dataset2._random_py()
assert_equal(idx1, idx2)
seed = 77
dataset1._shuffle_py(seed)
dataset2._shuffle_py(seed)
for i in range(5):
_, _, _, idx1 = dataset1._next_py()
_, _, _, idx2 = dataset2._next_py()
assert_equal(idx1, idx2)
_, _, _, idx1 = dataset1._random_py()
_, _, _, idx2 = dataset2._random_py()
assert_equal(idx1, idx2)
| bsd-3-clause |
CydarLtd/ansible | lib/ansible/module_utils/connection.py | 61 | 2929 | #
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2017 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import socket
import struct
import signal
from ansible.module_utils.basic import get_exception
from ansible.module_utils._text import to_bytes, to_native
def send_data(s, data):
packed_len = struct.pack('!Q',len(data))
return s.sendall(packed_len + data)
def recv_data(s):
header_len = 8 # size of a packed unsigned long long
data = to_bytes("")
while len(data) < header_len:
d = s.recv(header_len - len(data))
if not d:
return None
data += d
data_len = struct.unpack('!Q',data[:header_len])[0]
data = data[header_len:]
while len(data) < data_len:
d = s.recv(data_len - len(data))
if not d:
return None
data += d
return data
def exec_command(module, command):
try:
sf = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sf.connect(module._socket_path)
data = "EXEC: %s" % command
send_data(sf, to_bytes(data.strip()))
rc = int(recv_data(sf), 10)
stdout = recv_data(sf)
stderr = recv_data(sf)
except socket.error:
exc = get_exception()
sf.close()
module.fail_json(msg='unable to connect to socket', err=str(exc))
sf.close()
return (rc, to_native(stdout), to_native(stderr))
| gpl-3.0 |
SonarOpenCommunity/sonar-cxx | cxx-sensors/src/tools/clangsa_createrules.py | 1 | 6838 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# SonarQube C++ Community Plugin (cxx plugin)
# Copyright (C) 2010-2021 SonarOpenCommunity
# http://github.com/SonarOpenCommunity/sonar-cxx
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
#
"""
Simple script to generate the rules xml file for SonarQube cxx plugin
from the Clang Static Analyzer checkers.
The clang compiler should be available in the PATH
or output of clang -cc1 -analyzer-checker-help
as input file.
"""
from xml.dom import minidom
import argparse
import re
import subprocess
import sys
import xml.etree.ElementTree as ET
def CDATA(text=None):
element = ET.Element('![CDATA[')
element.text = text
return element
ET._original_serialize_xml = ET._serialize_xml
def _serialize_xml(write, elem, qnames, namespaces,
short_empty_elements, **kwargs):
if elem.tag == '![CDATA[':
write("<%s%s]]>" % (elem.tag, elem.text))
return
return ET._original_serialize_xml(
write, elem, qnames, namespaces, short_empty_elements, **kwargs)
ET._serialize_xml = ET._serialize['xml'] = _serialize_xml
def collect_checkers(clangsa_output):
"""
Parse clang static analyzer output.
Return the list of checkers and the description.
"""
checkers_data = {}
# Checker name and description in one line.
pattern = re.compile(r'^\s\s(?P<checker_name>\S*)\s*(?P<description>.*)')
checker_name = None
for line in clangsa_output.splitlines():
line = line.decode(encoding='UTF-8')
if re.match(r'^CHECKERS:', line) or line == '':
continue
elif checker_name and not re.match(r'^\s\s\S', line):
# Collect description for the checker name.
checkers_data[checker_name] = line.strip()
checker_name = None
elif re.match(r'^\s\s\S+$', line.rstrip()):
# Only checker name is in the line.
checker_name = line.strip()
else:
# Checker name and description is in one line.
match = pattern.match(line.rstrip())
if match:
current = match.groupdict()
checkers_data[current['checker_name']] = current['description']
# Filter out debug checkers.
non_debug = {k: v for k, v in checkers_data.items() if 'debug' not in k}
return non_debug
def main():
parser = argparse.ArgumentParser(
description="""Generate the rules xml file for cxx plugin
plugin from the Clang Static Analyzer checkers.
https://clang-analyzer.llvm.org/""",
usage='%(prog)s -o clangsa.xml')
parser.add_argument('-i', '--input', dest='input_file', action='store',
required=False,
help="""Input file to read rules.
If parameter does not exist
it tries to call clang.""")
parser.add_argument('-o', '--output', dest='output_file', action='store',
required=True,
help="""Output file to write the xml rules.
If the file already exists
it will be overwritten.""")
args = parser.parse_args()
clang_version = "clang version ???".encode('utf-8')
if args.input_file:
with open(args.input_file, 'r') as input:
checker_data = collect_checkers(input.read().encode('utf-8'))
else:
try:
clang_version = ['clang', '--version']
version_info = subprocess.run(clang_version,
stdout=subprocess.PIPE,
check=True).stdout
except subprocess.CalledProcessError as cpe:
sys.exit(cpe.returncode)
# Only the first line is interesting.
clang_version = version_info.splitlines()[0]
try:
clang_checkers = ['clang', '-cc1', '-analyzer-checker-help']
checkers_output = subprocess.run(clang_checkers,
stdout=subprocess.PIPE,
check=True).stdout
print("Collecting clang checkers ...", end='')
checker_data = collect_checkers(checkers_output)
except subprocess.CalledProcessError as cpe:
sys.exit(cpe.returncode)
if not checker_data:
print("No checkers could be processed.")
sys.exit(1)
print(" done.")
print("Generating rules xml ...", end='')
# build a tree structure
rules = ET.Element("rules")
comment = " C and C++ rules for Clang Static Analyzer. " \
"https://clang-analyzer.llvm.org/\n" + \
"Rules list was generated based on " + \
clang_version.decode("utf-8") + " "
rules.append(ET.Comment(comment))
for checker_name, description in checker_data.items():
rule = ET.SubElement(rules, "rule")
key = ET.SubElement(rule, "key")
name = ET.SubElement(rule, "name")
desc = ET.SubElement(rule, "description")
sev = ET.SubElement(rule, "severity")
c_type = ET.SubElement(rule, "type")
key.text = checker_name
name.text = checker_name
sev.text = "MAJOR"
c_type.text = "BUG"
if sev.text != 'INFO':
ET.SubElement(rule, 'remediationFunction').text = 'LINEAR'
ET.SubElement(rule, 'remediationFunctionGapMultiplier').text = '5min'
auto_tag = checker_name.split('.')[0]
tag = ET.SubElement(rule, "tag")
tag.text = auto_tag.lower()
cdata = CDATA('\n<p>' + description.strip() +
'\n</p>\n <h2>References</h2>'
' <p><a href="https://clang-analyzer.llvm.org/"'
' target="_blank">clang-analyzer.llvm.org</a></p> \n')
desc.append(cdata)
xmlstr = minidom.parseString(
ET.tostring(rules, method='xml')).toprettyxml(indent=" ")
print(" done.")
with open(args.output_file, 'w') as out:
out.write(xmlstr)
if __name__ == '__main__':
main()
| lgpl-3.0 |
CingHu/neutron-ustack | neutron/db/migration/alembic_migrations/versions/1d6ee1ae5da5_db_healing.py | 6 | 1060 | # Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Include all tables and make migrations unconditional.
Revision ID: db_healing
Revises: 5446f2a45467
Create Date: 2014-05-29 10:52:43.898980
"""
# revision identifiers, used by Alembic.
revision = 'db_healing'
down_revision = '5446f2a45467'
from neutron.db.migration.alembic_migrations import heal_script
def upgrade(active_plugins=None, options=None):
heal_script.heal()
def downgrade(active_plugins=None, options=None):
pass
| apache-2.0 |
TEAM-Gummy/platform_external_chromium_org | ppapi/generators/generator.py | 165 | 1776 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import traceback
# Note: some of these files are imported to register cmdline options.
from idl_generator import Generator
from idl_option import ParseOptions
from idl_outfile import IDLOutFile
from idl_parser import ParseFiles
from idl_c_header import HGen
from idl_thunk import TGen
from idl_gen_pnacl import PnaclGen
def Main(args):
# If no arguments are provided, assume we are trying to rebuild the
# C headers with warnings off.
try:
if not args:
args = [
'--wnone', '--cgen', '--range=start,end',
'--pnacl', '--pnaclshim',
'../native_client/src/untrusted/pnacl_irt_shim/pnacl_shim.c',
'--tgen',
]
current_dir = os.path.abspath(os.getcwd())
script_dir = os.path.abspath(os.path.dirname(__file__))
if current_dir != script_dir:
print '\nIncorrect CWD, default run skipped.'
print 'When running with no arguments set CWD to the scripts directory:'
print '\t' + script_dir + '\n'
print 'This ensures correct default paths and behavior.\n'
return 1
filenames = ParseOptions(args)
ast = ParseFiles(filenames)
if ast.errors:
print 'Found %d errors. Aborting build.\n' % ast.errors
return 1
return Generator.Run(ast)
except SystemExit, ec:
print 'Exiting with %d' % ec.code
sys.exit(ec.code)
except:
typeinfo, value, tb = sys.exc_info()
traceback.print_exception(typeinfo, value, tb)
print 'Called with: ' + ' '.join(sys.argv)
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| bsd-3-clause |
pyfidelity/rest-seed | backend/backrest/tests/test_change_password.py | 1 | 1714 | from pytest import fixture, mark
from transaction import commit
@fixture(scope='module')
def url(testing):
return testing.route_url('password-change')
@mark.user('alice')
def test_change_password(browser, url, alice):
data = dict(password='foo!', current='alice')
browser.put_json(url, data)
assert alice.validate_password('foo!')
@mark.user('alice')
def test_change_password_twice(browser, url, alice):
data = dict(password='foo!', current='alice')
browser.put_json(url, data)
assert alice.validate_password('foo!')
commit()
data = dict(password='alice', current='foo!')
browser.put_json(url, data)
alice = alice.query.one() # refetch alice after `commit`
assert alice.validate_password('alice')
@mark.user('alice')
def test_change_password_with_wrong_current_password(browser, url, alice):
data = dict(password='foo!', current='hurz?')
result = browser.put_json(url, data, status=400).json
assert [(e['name'], e['description']) for e in result['errors']] == [
('current', 'Password does not match')]
assert alice.validate_password('alice')
@mark.user('alice')
def test_change_password_without_current_password(browser, url, alice):
data = dict(password='foo!')
result = browser.put_json(url, data, status=400).json
assert [(e['name'], e['description']) for e in result['errors']] == [
('current', 'current is missing')]
assert alice.validate_password('alice')
@mark.user('alice')
def test_set_password_without_existing_password(browser, url, alice):
alice.password = None
data = dict(password='foo!', current=None)
browser.put_json(url, data)
assert alice.validate_password('foo!')
| bsd-2-clause |
gsnbng/erpnext | erpnext/buying/report/subcontracted_item_to_be_received/subcontracted_item_to_be_received.py | 9 | 2552 | # Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
if filters.from_date >= filters.to_date:
frappe.msgprint(_("To Date must be greater than From Date"))
data = []
columns = get_columns()
get_data(data , filters)
return columns, data
def get_columns():
return [
{
"label": _("Purchase Order"),
"fieldtype": "Link",
"fieldname": "purchase_order",
"options": "Purchase Order",
"width": 150
},
{
"label": _("Date"),
"fieldtype": "Date",
"fieldname": "date",
"hidden": 1,
"width": 150
},
{
"label": _("Supplier"),
"fieldtype": "Link",
"fieldname": "supplier",
"options": "Supplier",
"width": 150
},
{
"label": _("Finished Good Item Code"),
"fieldtype": "Data",
"fieldname": "fg_item_code",
"width": 100
},
{
"label": _("Item name"),
"fieldtype": "Data",
"fieldname": "item_name",
"width": 100
},
{
"label": _("Required Quantity"),
"fieldtype": "Float",
"fieldname": "required_qty",
"width": 100
},
{
"label": _("Received Quantity"),
"fieldtype": "Float",
"fieldname": "received_qty",
"width": 100
},
{
"label": _("Pending Quantity"),
"fieldtype": "Float",
"fieldname": "pending_qty",
"width": 100
}
]
def get_data(data, filters):
po = get_po(filters)
po_name = [v.name for v in po]
sub_items = get_purchase_order_item_supplied(po_name)
for item in sub_items:
for order in po:
if order.name == item.parent and item.received_qty < item.qty:
row ={
'purchase_order': item.parent,
'date': order.transaction_date,
'supplier': order.supplier,
'fg_item_code': item.item_code,
'item_name': item.item_name,
'required_qty': item.qty,
'received_qty':item.received_qty,
'pending_qty':item.qty - item.received_qty
}
data.append(row)
def get_po(filters):
record_filters = [
["is_subcontracted", "=", "Yes"],
["supplier", "=", filters.supplier],
["transaction_date", "<=", filters.to_date],
["transaction_date", ">=", filters.from_date],
["docstatus", "=", 1]
]
return frappe.get_all("Purchase Order", filters=record_filters, fields=["name", "transaction_date", "supplier"])
def get_purchase_order_item_supplied(po):
return frappe.get_all("Purchase Order Item", filters=[
('parent', 'IN', po)
], fields=["parent", "item_code", "item_name", "qty", "received_qty"])
| agpl-3.0 |
wscullin/spack | var/spack/repos/builtin/packages/py-pytz/package.py | 3 | 1982 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyPytz(PythonPackage):
"""World timezone definitions, modern and historical."""
homepage = "http://pythonhosted.org/pytz"
url = "https://pypi.io/packages/source/p/pytz/pytz-2016.10.tar.gz"
import_modules = ['pytz']
version('2017.2', 'f89bde8a811c8a1a5bac17eaaa94383c',
url="https://pypi.io/packages/source/p/pytz/pytz-2017.2.zip")
version('2016.10', 'cc9f16ba436efabdcef3c4d32ae4919c')
version('2016.6.1', 'b6c28a3b968bc1d8badfb61b93874e03')
version('2014.10', 'eb1cb941a20c5b751352c52486aa1dd7')
version('2015.4', '417a47b1c432d90333e42084a605d3d8')
version('2016.3', 'abae92c3301b27bd8a9f56b14f52cb29')
depends_on('py-setuptools', type='build')
| lgpl-2.1 |
weidnem/IntroPython2016 | students/baumel/session_07/HTML_lab/test_html_render.py | 3 | 1663 | """
test code for html_render
"""
import io
from html_render import Element
def test_init():
e = Element()
e = Element("this is some text")
def test_content():
#fixme: this test internals!!!!!!!!
e = Element("this is some text")
assert "this is some text" in e.content
def test_append():
e = Element("this is some text")
e.append("some more text, wooHoo!")
assert "some more text, wooHoo!" in e.content
def test_two_instances():
e = Element("this is some text")
e2 = Element("this is some text")
e.append("some more text")
assert "some more text" not in e2.content
def test_render():
outfile = io.StringIO()
e = Element("this is some text")
e.append("and this is some more text, WooHoo!!")
e.render(outfile)
outfile.seek(0)
file_contents = outfile.read()
#f = open('test1.html', 'w')
#f.write(file_contents)
open('test1.html', 'w').write(file_contents)
print(file_contents)
assert ("this is some text") in file_contents
assert ("and this is some more text, WooHoo!!") in file_contents
assert file_contents.startswith("<html>")
assert file_contents.strip().endswith("</html>")
def test_tag():
outfile = io.StringIO()
e = Element("this is some text", "body")
e.append("and this is some more text, WooHoo!!")
e.render(outfile)
outfile.seek(0)
file_contents = outfile.read()
#f = open('test1.html', 'w')
#f.write(file_contents)
open('test1.html', 'w').write(file_contents)
print(file_contents)
assert ("this is some text") in file_contents
assert ("and this is some more text, WooHoo!!") in file_contents
assert file_contents.startswith("<body>")
assert file_contents.strip().endswith("</body>")
| unlicense |
mindnervestech/mnrp | addons/sale_crm/wizard/crm_make_sale.py | 74 | 7132 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class crm_make_sale(osv.osv_memory):
""" Make sale order for crm """
_name = "crm.make.sale"
_description = "Make sales"
def _selectPartner(self, cr, uid, context=None):
"""
This function gets default value for partner_id field.
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param context: A standard dictionary for contextual values
@return: default value of partner_id field.
"""
if context is None:
context = {}
lead_obj = self.pool.get('crm.lead')
active_id = context and context.get('active_id', False) or False
if not active_id:
return False
lead = lead_obj.read(cr, uid, [active_id], ['partner_id'], context=context)[0]
return lead['partner_id'][0] if lead['partner_id'] else False
def view_init(self, cr, uid, fields_list, context=None):
return super(crm_make_sale, self).view_init(cr, uid, fields_list, context=context)
def makeOrder(self, cr, uid, ids, context=None):
"""
This function create Quotation on given case.
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm make sales' ids
@param context: A standard dictionary for contextual values
@return: Dictionary value of created sales order.
"""
# update context: if come from phonecall, default state values can make the quote crash lp:1017353
context = dict(context or {})
context.pop('default_state', False)
case_obj = self.pool.get('crm.lead')
sale_obj = self.pool.get('sale.order')
partner_obj = self.pool.get('res.partner')
data = context and context.get('active_ids', []) or []
for make in self.browse(cr, uid, ids, context=context):
partner = make.partner_id
partner_addr = partner_obj.address_get(cr, uid, [partner.id],
['default', 'invoice', 'delivery', 'contact'])
pricelist = partner.property_product_pricelist.id
fpos = partner.property_account_position and partner.property_account_position.id or False
payment_term = partner.property_payment_term and partner.property_payment_term.id or False
new_ids = []
for case in case_obj.browse(cr, uid, data, context=context):
if not partner and case.partner_id:
partner = case.partner_id
fpos = partner.property_account_position and partner.property_account_position.id or False
payment_term = partner.property_payment_term and partner.property_payment_term.id or False
partner_addr = partner_obj.address_get(cr, uid, [partner.id],
['default', 'invoice', 'delivery', 'contact'])
pricelist = partner.property_product_pricelist.id
if False in partner_addr.values():
raise osv.except_osv(_('Insufficient Data!'), _('No address(es) defined for this customer.'))
vals = {
'origin': _('Opportunity: %s') % str(case.id),
'section_id': case.section_id and case.section_id.id or False,
'categ_ids': [(6, 0, [categ_id.id for categ_id in case.categ_ids])],
'partner_id': partner.id,
'pricelist_id': pricelist,
'partner_invoice_id': partner_addr['invoice'],
'partner_shipping_id': partner_addr['delivery'],
'date_order': fields.date.context_today(self,cr,uid,context=context),
'fiscal_position': fpos,
'payment_term':payment_term,
}
if partner.id:
vals['user_id'] = partner.user_id and partner.user_id.id or uid
new_id = sale_obj.create(cr, uid, vals, context=context)
sale_order = sale_obj.browse(cr, uid, new_id, context=context)
case_obj.write(cr, uid, [case.id], {'ref': 'sale.order,%s' % new_id})
new_ids.append(new_id)
message = _("Opportunity has been <b>converted</b> to the quotation <em>%s</em>.") % (sale_order.name)
case.message_post(body=message)
if make.close:
case_obj.case_mark_won(cr, uid, data, context=context)
if not new_ids:
return {'type': 'ir.actions.act_window_close'}
if len(new_ids)<=1:
value = {
'domain': str([('id', 'in', new_ids)]),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'sale.order',
'view_id': False,
'type': 'ir.actions.act_window',
'name' : _('Quotation'),
'res_id': new_ids and new_ids[0]
}
else:
value = {
'domain': str([('id', 'in', new_ids)]),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'sale.order',
'view_id': False,
'type': 'ir.actions.act_window',
'name' : _('Quotation'),
'res_id': new_ids
}
return value
_columns = {
'partner_id': fields.many2one('res.partner', 'Customer', required=True, domain=[('customer','=',True)]),
'close': fields.boolean('Mark Won', help='Check this to close the opportunity after having created the sales order.'),
}
_defaults = {
'close': False,
'partner_id': _selectPartner,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mirkix/ardupilot | Tools/scripts/frame_sizes.py | 351 | 1117 | #!/usr/bin/env python
import re, sys, operator, os
code_line = re.compile("^\s*\d+:/")
frame_line = re.compile("^\s*\d+\s+/\* frame size = (\d+) \*/")
class frame(object):
def __init__(self, code, frame_size):
self.code = code
self.frame_size = int(frame_size)
frames = []
def process_lst(filename):
'''process one lst file'''
last_code = ''
h = open(filename, mode='r')
for line in h:
if code_line.match(line):
last_code = line.strip()
elif frame_line.match(line):
frames.append(frame(last_code, frame_line.match(line).group(1)))
h.close()
if len(sys.argv) > 1:
dname = sys.argv[1]
else:
dname = '.'
for root, dirs, files in os.walk(dname):
for f in files:
if f.endswith(".lst"):
process_lst(os.path.join(root, f))
sorted_frames = sorted(frames,
key=operator.attrgetter('frame_size'),
reverse=True)
print("FrameSize Code")
for frame in sorted_frames:
if frame.frame_size > 0:
print("%9u %s" % (frame.frame_size, frame.code))
| gpl-3.0 |
antb/TPT----My-old-mod | src/python/stdlib/plat-mac/Carbon/Appearance.py | 81 | 27268 | # Generated from 'Appearance.h'
def FOUR_CHAR_CODE(x): return x
kAppearanceEventClass = FOUR_CHAR_CODE('appr')
kAEAppearanceChanged = FOUR_CHAR_CODE('thme')
kAESystemFontChanged = FOUR_CHAR_CODE('sysf')
kAESmallSystemFontChanged = FOUR_CHAR_CODE('ssfn')
kAEViewsFontChanged = FOUR_CHAR_CODE('vfnt')
kThemeDataFileType = FOUR_CHAR_CODE('thme')
kThemePlatinumFileType = FOUR_CHAR_CODE('pltn')
kThemeCustomThemesFileType = FOUR_CHAR_CODE('scen')
kThemeSoundTrackFileType = FOUR_CHAR_CODE('tsnd')
kThemeBrushDialogBackgroundActive = 1
kThemeBrushDialogBackgroundInactive = 2
kThemeBrushAlertBackgroundActive = 3
kThemeBrushAlertBackgroundInactive = 4
kThemeBrushModelessDialogBackgroundActive = 5
kThemeBrushModelessDialogBackgroundInactive = 6
kThemeBrushUtilityWindowBackgroundActive = 7
kThemeBrushUtilityWindowBackgroundInactive = 8
kThemeBrushListViewSortColumnBackground = 9
kThemeBrushListViewBackground = 10
kThemeBrushIconLabelBackground = 11
kThemeBrushListViewSeparator = 12
kThemeBrushChasingArrows = 13
kThemeBrushDragHilite = 14
kThemeBrushDocumentWindowBackground = 15
kThemeBrushFinderWindowBackground = 16
kThemeBrushScrollBarDelimiterActive = 17
kThemeBrushScrollBarDelimiterInactive = 18
kThemeBrushFocusHighlight = 19
kThemeBrushPopupArrowActive = 20
kThemeBrushPopupArrowPressed = 21
kThemeBrushPopupArrowInactive = 22
kThemeBrushAppleGuideCoachmark = 23
kThemeBrushIconLabelBackgroundSelected = 24
kThemeBrushStaticAreaFill = 25
kThemeBrushActiveAreaFill = 26
kThemeBrushButtonFrameActive = 27
kThemeBrushButtonFrameInactive = 28
kThemeBrushButtonFaceActive = 29
kThemeBrushButtonFaceInactive = 30
kThemeBrushButtonFacePressed = 31
kThemeBrushButtonActiveDarkShadow = 32
kThemeBrushButtonActiveDarkHighlight = 33
kThemeBrushButtonActiveLightShadow = 34
kThemeBrushButtonActiveLightHighlight = 35
kThemeBrushButtonInactiveDarkShadow = 36
kThemeBrushButtonInactiveDarkHighlight = 37
kThemeBrushButtonInactiveLightShadow = 38
kThemeBrushButtonInactiveLightHighlight = 39
kThemeBrushButtonPressedDarkShadow = 40
kThemeBrushButtonPressedDarkHighlight = 41
kThemeBrushButtonPressedLightShadow = 42
kThemeBrushButtonPressedLightHighlight = 43
kThemeBrushBevelActiveLight = 44
kThemeBrushBevelActiveDark = 45
kThemeBrushBevelInactiveLight = 46
kThemeBrushBevelInactiveDark = 47
kThemeBrushNotificationWindowBackground = 48
kThemeBrushMovableModalBackground = 49
kThemeBrushSheetBackgroundOpaque = 50
kThemeBrushDrawerBackground = 51
kThemeBrushToolbarBackground = 52
kThemeBrushSheetBackgroundTransparent = 53
kThemeBrushMenuBackground = 54
kThemeBrushMenuBackgroundSelected = 55
kThemeBrushSheetBackground = kThemeBrushSheetBackgroundOpaque
kThemeBrushBlack = -1
kThemeBrushWhite = -2
kThemeBrushPrimaryHighlightColor = -3
kThemeBrushSecondaryHighlightColor = -4
kThemeTextColorDialogActive = 1
kThemeTextColorDialogInactive = 2
kThemeTextColorAlertActive = 3
kThemeTextColorAlertInactive = 4
kThemeTextColorModelessDialogActive = 5
kThemeTextColorModelessDialogInactive = 6
kThemeTextColorWindowHeaderActive = 7
kThemeTextColorWindowHeaderInactive = 8
kThemeTextColorPlacardActive = 9
kThemeTextColorPlacardInactive = 10
kThemeTextColorPlacardPressed = 11
kThemeTextColorPushButtonActive = 12
kThemeTextColorPushButtonInactive = 13
kThemeTextColorPushButtonPressed = 14
kThemeTextColorBevelButtonActive = 15
kThemeTextColorBevelButtonInactive = 16
kThemeTextColorBevelButtonPressed = 17
kThemeTextColorPopupButtonActive = 18
kThemeTextColorPopupButtonInactive = 19
kThemeTextColorPopupButtonPressed = 20
kThemeTextColorIconLabel = 21
kThemeTextColorListView = 22
kThemeTextColorDocumentWindowTitleActive = 23
kThemeTextColorDocumentWindowTitleInactive = 24
kThemeTextColorMovableModalWindowTitleActive = 25
kThemeTextColorMovableModalWindowTitleInactive = 26
kThemeTextColorUtilityWindowTitleActive = 27
kThemeTextColorUtilityWindowTitleInactive = 28
kThemeTextColorPopupWindowTitleActive = 29
kThemeTextColorPopupWindowTitleInactive = 30
kThemeTextColorRootMenuActive = 31
kThemeTextColorRootMenuSelected = 32
kThemeTextColorRootMenuDisabled = 33
kThemeTextColorMenuItemActive = 34
kThemeTextColorMenuItemSelected = 35
kThemeTextColorMenuItemDisabled = 36
kThemeTextColorPopupLabelActive = 37
kThemeTextColorPopupLabelInactive = 38
kThemeTextColorTabFrontActive = 39
kThemeTextColorTabNonFrontActive = 40
kThemeTextColorTabNonFrontPressed = 41
kThemeTextColorTabFrontInactive = 42
kThemeTextColorTabNonFrontInactive = 43
kThemeTextColorIconLabelSelected = 44
kThemeTextColorBevelButtonStickyActive = 45
kThemeTextColorBevelButtonStickyInactive = 46
kThemeTextColorNotification = 47
kThemeTextColorBlack = -1
kThemeTextColorWhite = -2
kThemeStateInactive = 0
kThemeStateActive = 1
kThemeStatePressed = 2
kThemeStateRollover = 6
kThemeStateUnavailable = 7
kThemeStateUnavailableInactive = 8
kThemeStateDisabled = 0
kThemeStatePressedUp = 2
kThemeStatePressedDown = 3
kThemeArrowCursor = 0
kThemeCopyArrowCursor = 1
kThemeAliasArrowCursor = 2
kThemeContextualMenuArrowCursor = 3
kThemeIBeamCursor = 4
kThemeCrossCursor = 5
kThemePlusCursor = 6
kThemeWatchCursor = 7
kThemeClosedHandCursor = 8
kThemeOpenHandCursor = 9
kThemePointingHandCursor = 10
kThemeCountingUpHandCursor = 11
kThemeCountingDownHandCursor = 12
kThemeCountingUpAndDownHandCursor = 13
kThemeSpinningCursor = 14
kThemeResizeLeftCursor = 15
kThemeResizeRightCursor = 16
kThemeResizeLeftRightCursor = 17
kThemeMenuBarNormal = 0
kThemeMenuBarSelected = 1
kThemeMenuSquareMenuBar = (1 << 0)
kThemeMenuActive = 0
kThemeMenuSelected = 1
kThemeMenuDisabled = 3
kThemeMenuTypePullDown = 0
kThemeMenuTypePopUp = 1
kThemeMenuTypeHierarchical = 2
kThemeMenuTypeInactive = 0x0100
kThemeMenuItemPlain = 0
kThemeMenuItemHierarchical = 1
kThemeMenuItemScrollUpArrow = 2
kThemeMenuItemScrollDownArrow = 3
kThemeMenuItemAtTop = 0x0100
kThemeMenuItemAtBottom = 0x0200
kThemeMenuItemHierBackground = 0x0400
kThemeMenuItemPopUpBackground = 0x0800
kThemeMenuItemHasIcon = 0x8000
kThemeMenuItemNoBackground = 0x4000
kThemeBackgroundTabPane = 1
kThemeBackgroundPlacard = 2
kThemeBackgroundWindowHeader = 3
kThemeBackgroundListViewWindowHeader = 4
kThemeBackgroundSecondaryGroupBox = 5
kThemeNameTag = FOUR_CHAR_CODE('name')
kThemeVariantNameTag = FOUR_CHAR_CODE('varn')
kThemeVariantBaseTintTag = FOUR_CHAR_CODE('tint')
kThemeHighlightColorTag = FOUR_CHAR_CODE('hcol')
kThemeScrollBarArrowStyleTag = FOUR_CHAR_CODE('sbar')
kThemeScrollBarThumbStyleTag = FOUR_CHAR_CODE('sbth')
kThemeSoundsEnabledTag = FOUR_CHAR_CODE('snds')
kThemeDblClickCollapseTag = FOUR_CHAR_CODE('coll')
kThemeAppearanceFileNameTag = FOUR_CHAR_CODE('thme')
kThemeSystemFontTag = FOUR_CHAR_CODE('lgsf')
kThemeSmallSystemFontTag = FOUR_CHAR_CODE('smsf')
kThemeViewsFontTag = FOUR_CHAR_CODE('vfnt')
kThemeViewsFontSizeTag = FOUR_CHAR_CODE('vfsz')
kThemeDesktopPatternNameTag = FOUR_CHAR_CODE('patn')
kThemeDesktopPatternTag = FOUR_CHAR_CODE('patt')
kThemeDesktopPictureNameTag = FOUR_CHAR_CODE('dpnm')
kThemeDesktopPictureAliasTag = FOUR_CHAR_CODE('dpal')
kThemeDesktopPictureAlignmentTag = FOUR_CHAR_CODE('dpan')
kThemeHighlightColorNameTag = FOUR_CHAR_CODE('hcnm')
kThemeExamplePictureIDTag = FOUR_CHAR_CODE('epic')
kThemeSoundTrackNameTag = FOUR_CHAR_CODE('sndt')
kThemeSoundMaskTag = FOUR_CHAR_CODE('smsk')
kThemeUserDefinedTag = FOUR_CHAR_CODE('user')
kThemeSmoothFontEnabledTag = FOUR_CHAR_CODE('smoo')
kThemeSmoothFontMinSizeTag = FOUR_CHAR_CODE('smos')
kTiledOnScreen = 1
kCenterOnScreen = 2
kFitToScreen = 3
kFillScreen = 4
kUseBestGuess = 5
kThemeCheckBoxClassicX = 0
kThemeCheckBoxCheckMark = 1
kThemeScrollBarArrowsSingle = 0
kThemeScrollBarArrowsLowerRight = 1
kThemeScrollBarThumbNormal = 0
kThemeScrollBarThumbProportional = 1
kThemeSystemFont = 0
kThemeSmallSystemFont = 1
kThemeSmallEmphasizedSystemFont = 2
kThemeViewsFont = 3
kThemeEmphasizedSystemFont = 4
kThemeApplicationFont = 5
kThemeLabelFont = 6
kThemeMenuTitleFont = 100
kThemeMenuItemFont = 101
kThemeMenuItemMarkFont = 102
kThemeMenuItemCmdKeyFont = 103
kThemeWindowTitleFont = 104
kThemePushButtonFont = 105
kThemeUtilityWindowTitleFont = 106
kThemeAlertHeaderFont = 107
kThemeCurrentPortFont = 200
kThemeTabNonFront = 0
kThemeTabNonFrontPressed = 1
kThemeTabNonFrontInactive = 2
kThemeTabFront = 3
kThemeTabFrontInactive = 4
kThemeTabNonFrontUnavailable = 5
kThemeTabFrontUnavailable = 6
kThemeTabNorth = 0
kThemeTabSouth = 1
kThemeTabEast = 2
kThemeTabWest = 3
kThemeSmallTabHeight = 16
kThemeLargeTabHeight = 21
kThemeTabPaneOverlap = 3
kThemeSmallTabHeightMax = 19
kThemeLargeTabHeightMax = 24
kThemeMediumScrollBar = 0
kThemeSmallScrollBar = 1
kThemeMediumSlider = 2
kThemeMediumProgressBar = 3
kThemeMediumIndeterminateBar = 4
kThemeRelevanceBar = 5
kThemeSmallSlider = 6
kThemeLargeProgressBar = 7
kThemeLargeIndeterminateBar = 8
kThemeTrackActive = 0
kThemeTrackDisabled = 1
kThemeTrackNothingToScroll = 2
kThemeTrackInactive = 3
kThemeLeftOutsideArrowPressed = 0x01
kThemeLeftInsideArrowPressed = 0x02
kThemeLeftTrackPressed = 0x04
kThemeThumbPressed = 0x08
kThemeRightTrackPressed = 0x10
kThemeRightInsideArrowPressed = 0x20
kThemeRightOutsideArrowPressed = 0x40
kThemeTopOutsideArrowPressed = kThemeLeftOutsideArrowPressed
kThemeTopInsideArrowPressed = kThemeLeftInsideArrowPressed
kThemeTopTrackPressed = kThemeLeftTrackPressed
kThemeBottomTrackPressed = kThemeRightTrackPressed
kThemeBottomInsideArrowPressed = kThemeRightInsideArrowPressed
kThemeBottomOutsideArrowPressed = kThemeRightOutsideArrowPressed
kThemeThumbPlain = 0
kThemeThumbUpward = 1
kThemeThumbDownward = 2
kThemeTrackHorizontal = (1 << 0)
kThemeTrackRightToLeft = (1 << 1)
kThemeTrackShowThumb = (1 << 2)
kThemeTrackThumbRgnIsNotGhost = (1 << 3)
kThemeTrackNoScrollBarArrows = (1 << 4)
kThemeWindowHasGrow = (1 << 0)
kThemeWindowHasHorizontalZoom = (1 << 3)
kThemeWindowHasVerticalZoom = (1 << 4)
kThemeWindowHasFullZoom = kThemeWindowHasHorizontalZoom + kThemeWindowHasVerticalZoom
kThemeWindowHasCloseBox = (1 << 5)
kThemeWindowHasCollapseBox = (1 << 6)
kThemeWindowHasTitleText = (1 << 7)
kThemeWindowIsCollapsed = (1 << 8)
kThemeWindowHasDirty = (1 << 9)
kThemeDocumentWindow = 0
kThemeDialogWindow = 1
kThemeMovableDialogWindow = 2
kThemeAlertWindow = 3
kThemeMovableAlertWindow = 4
kThemePlainDialogWindow = 5
kThemeShadowDialogWindow = 6
kThemePopupWindow = 7
kThemeUtilityWindow = 8
kThemeUtilitySideWindow = 9
kThemeSheetWindow = 10
kThemeDrawerWindow = 11
kThemeWidgetCloseBox = 0
kThemeWidgetZoomBox = 1
kThemeWidgetCollapseBox = 2
kThemeWidgetDirtyCloseBox = 6
kThemeArrowLeft = 0
kThemeArrowDown = 1
kThemeArrowRight = 2
kThemeArrowUp = 3
kThemeArrow3pt = 0
kThemeArrow5pt = 1
kThemeArrow7pt = 2
kThemeArrow9pt = 3
kThemeGrowLeft = (1 << 0)
kThemeGrowRight = (1 << 1)
kThemeGrowUp = (1 << 2)
kThemeGrowDown = (1 << 3)
kThemePushButton = 0
kThemeCheckBox = 1
kThemeRadioButton = 2
kThemeBevelButton = 3
kThemeArrowButton = 4
kThemePopupButton = 5
kThemeDisclosureButton = 6
kThemeIncDecButton = 7
kThemeSmallBevelButton = 8
kThemeMediumBevelButton = 3
kThemeLargeBevelButton = 9
kThemeListHeaderButton = 10
kThemeRoundButton = 11
kThemeLargeRoundButton = 12
kThemeSmallCheckBox = 13
kThemeSmallRadioButton = 14
kThemeRoundedBevelButton = 15
kThemeNormalCheckBox = kThemeCheckBox
kThemeNormalRadioButton = kThemeRadioButton
kThemeButtonOff = 0
kThemeButtonOn = 1
kThemeButtonMixed = 2
kThemeDisclosureRight = 0
kThemeDisclosureDown = 1
kThemeDisclosureLeft = 2
kThemeAdornmentNone = 0
kThemeAdornmentDefault = (1 << 0)
kThemeAdornmentFocus = (1 << 2)
kThemeAdornmentRightToLeft = (1 << 4)
kThemeAdornmentDrawIndicatorOnly = (1 << 5)
kThemeAdornmentHeaderButtonLeftNeighborSelected = (1 << 6)
kThemeAdornmentHeaderButtonRightNeighborSelected = (1 << 7)
kThemeAdornmentHeaderButtonSortUp = (1 << 8)
kThemeAdornmentHeaderMenuButton = (1 << 9)
kThemeAdornmentHeaderButtonNoShadow = (1 << 10)
kThemeAdornmentHeaderButtonShadowOnly = (1 << 11)
kThemeAdornmentNoShadow = kThemeAdornmentHeaderButtonNoShadow
kThemeAdornmentShadowOnly = kThemeAdornmentHeaderButtonShadowOnly
kThemeAdornmentArrowLeftArrow = (1 << 6)
kThemeAdornmentArrowDownArrow = (1 << 7)
kThemeAdornmentArrowDoubleArrow = (1 << 8)
kThemeAdornmentArrowUpArrow = (1 << 9)
kThemeNoSounds = 0
kThemeWindowSoundsMask = (1 << 0)
kThemeMenuSoundsMask = (1 << 1)
kThemeControlSoundsMask = (1 << 2)
kThemeFinderSoundsMask = (1 << 3)
kThemeDragSoundNone = 0
kThemeDragSoundMoveWindow = FOUR_CHAR_CODE('wmov')
kThemeDragSoundGrowWindow = FOUR_CHAR_CODE('wgro')
kThemeDragSoundMoveUtilWindow = FOUR_CHAR_CODE('umov')
kThemeDragSoundGrowUtilWindow = FOUR_CHAR_CODE('ugro')
kThemeDragSoundMoveDialog = FOUR_CHAR_CODE('dmov')
kThemeDragSoundMoveAlert = FOUR_CHAR_CODE('amov')
kThemeDragSoundMoveIcon = FOUR_CHAR_CODE('imov')
kThemeDragSoundSliderThumb = FOUR_CHAR_CODE('slth')
kThemeDragSoundSliderGhost = FOUR_CHAR_CODE('slgh')
kThemeDragSoundScrollBarThumb = FOUR_CHAR_CODE('sbth')
kThemeDragSoundScrollBarGhost = FOUR_CHAR_CODE('sbgh')
kThemeDragSoundScrollBarArrowDecreasing = FOUR_CHAR_CODE('sbad')
kThemeDragSoundScrollBarArrowIncreasing = FOUR_CHAR_CODE('sbai')
kThemeDragSoundDragging = FOUR_CHAR_CODE('drag')
kThemeSoundNone = 0
kThemeSoundMenuOpen = FOUR_CHAR_CODE('mnuo')
kThemeSoundMenuClose = FOUR_CHAR_CODE('mnuc')
kThemeSoundMenuItemHilite = FOUR_CHAR_CODE('mnui')
kThemeSoundMenuItemRelease = FOUR_CHAR_CODE('mnus')
kThemeSoundWindowClosePress = FOUR_CHAR_CODE('wclp')
kThemeSoundWindowCloseEnter = FOUR_CHAR_CODE('wcle')
kThemeSoundWindowCloseExit = FOUR_CHAR_CODE('wclx')
kThemeSoundWindowCloseRelease = FOUR_CHAR_CODE('wclr')
kThemeSoundWindowZoomPress = FOUR_CHAR_CODE('wzmp')
kThemeSoundWindowZoomEnter = FOUR_CHAR_CODE('wzme')
kThemeSoundWindowZoomExit = FOUR_CHAR_CODE('wzmx')
kThemeSoundWindowZoomRelease = FOUR_CHAR_CODE('wzmr')
kThemeSoundWindowCollapsePress = FOUR_CHAR_CODE('wcop')
kThemeSoundWindowCollapseEnter = FOUR_CHAR_CODE('wcoe')
kThemeSoundWindowCollapseExit = FOUR_CHAR_CODE('wcox')
kThemeSoundWindowCollapseRelease = FOUR_CHAR_CODE('wcor')
kThemeSoundWindowDragBoundary = FOUR_CHAR_CODE('wdbd')
kThemeSoundUtilWinClosePress = FOUR_CHAR_CODE('uclp')
kThemeSoundUtilWinCloseEnter = FOUR_CHAR_CODE('ucle')
kThemeSoundUtilWinCloseExit = FOUR_CHAR_CODE('uclx')
kThemeSoundUtilWinCloseRelease = FOUR_CHAR_CODE('uclr')
kThemeSoundUtilWinZoomPress = FOUR_CHAR_CODE('uzmp')
kThemeSoundUtilWinZoomEnter = FOUR_CHAR_CODE('uzme')
kThemeSoundUtilWinZoomExit = FOUR_CHAR_CODE('uzmx')
kThemeSoundUtilWinZoomRelease = FOUR_CHAR_CODE('uzmr')
kThemeSoundUtilWinCollapsePress = FOUR_CHAR_CODE('ucop')
kThemeSoundUtilWinCollapseEnter = FOUR_CHAR_CODE('ucoe')
kThemeSoundUtilWinCollapseExit = FOUR_CHAR_CODE('ucox')
kThemeSoundUtilWinCollapseRelease = FOUR_CHAR_CODE('ucor')
kThemeSoundUtilWinDragBoundary = FOUR_CHAR_CODE('udbd')
kThemeSoundWindowOpen = FOUR_CHAR_CODE('wopn')
kThemeSoundWindowClose = FOUR_CHAR_CODE('wcls')
kThemeSoundWindowZoomIn = FOUR_CHAR_CODE('wzmi')
kThemeSoundWindowZoomOut = FOUR_CHAR_CODE('wzmo')
kThemeSoundWindowCollapseUp = FOUR_CHAR_CODE('wcol')
kThemeSoundWindowCollapseDown = FOUR_CHAR_CODE('wexp')
kThemeSoundWindowActivate = FOUR_CHAR_CODE('wact')
kThemeSoundUtilWindowOpen = FOUR_CHAR_CODE('uopn')
kThemeSoundUtilWindowClose = FOUR_CHAR_CODE('ucls')
kThemeSoundUtilWindowZoomIn = FOUR_CHAR_CODE('uzmi')
kThemeSoundUtilWindowZoomOut = FOUR_CHAR_CODE('uzmo')
kThemeSoundUtilWindowCollapseUp = FOUR_CHAR_CODE('ucol')
kThemeSoundUtilWindowCollapseDown = FOUR_CHAR_CODE('uexp')
kThemeSoundUtilWindowActivate = FOUR_CHAR_CODE('uact')
kThemeSoundDialogOpen = FOUR_CHAR_CODE('dopn')
kThemeSoundDialogClose = FOUR_CHAR_CODE('dlgc')
kThemeSoundAlertOpen = FOUR_CHAR_CODE('aopn')
kThemeSoundAlertClose = FOUR_CHAR_CODE('altc')
kThemeSoundPopupWindowOpen = FOUR_CHAR_CODE('pwop')
kThemeSoundPopupWindowClose = FOUR_CHAR_CODE('pwcl')
kThemeSoundButtonPress = FOUR_CHAR_CODE('btnp')
kThemeSoundButtonEnter = FOUR_CHAR_CODE('btne')
kThemeSoundButtonExit = FOUR_CHAR_CODE('btnx')
kThemeSoundButtonRelease = FOUR_CHAR_CODE('btnr')
kThemeSoundDefaultButtonPress = FOUR_CHAR_CODE('dbtp')
kThemeSoundDefaultButtonEnter = FOUR_CHAR_CODE('dbte')
kThemeSoundDefaultButtonExit = FOUR_CHAR_CODE('dbtx')
kThemeSoundDefaultButtonRelease = FOUR_CHAR_CODE('dbtr')
kThemeSoundCancelButtonPress = FOUR_CHAR_CODE('cbtp')
kThemeSoundCancelButtonEnter = FOUR_CHAR_CODE('cbte')
kThemeSoundCancelButtonExit = FOUR_CHAR_CODE('cbtx')
kThemeSoundCancelButtonRelease = FOUR_CHAR_CODE('cbtr')
kThemeSoundCheckboxPress = FOUR_CHAR_CODE('chkp')
kThemeSoundCheckboxEnter = FOUR_CHAR_CODE('chke')
kThemeSoundCheckboxExit = FOUR_CHAR_CODE('chkx')
kThemeSoundCheckboxRelease = FOUR_CHAR_CODE('chkr')
kThemeSoundRadioPress = FOUR_CHAR_CODE('radp')
kThemeSoundRadioEnter = FOUR_CHAR_CODE('rade')
kThemeSoundRadioExit = FOUR_CHAR_CODE('radx')
kThemeSoundRadioRelease = FOUR_CHAR_CODE('radr')
kThemeSoundScrollArrowPress = FOUR_CHAR_CODE('sbap')
kThemeSoundScrollArrowEnter = FOUR_CHAR_CODE('sbae')
kThemeSoundScrollArrowExit = FOUR_CHAR_CODE('sbax')
kThemeSoundScrollArrowRelease = FOUR_CHAR_CODE('sbar')
kThemeSoundScrollEndOfTrack = FOUR_CHAR_CODE('sbte')
kThemeSoundScrollTrackPress = FOUR_CHAR_CODE('sbtp')
kThemeSoundSliderEndOfTrack = FOUR_CHAR_CODE('slte')
kThemeSoundSliderTrackPress = FOUR_CHAR_CODE('sltp')
kThemeSoundBalloonOpen = FOUR_CHAR_CODE('blno')
kThemeSoundBalloonClose = FOUR_CHAR_CODE('blnc')
kThemeSoundBevelPress = FOUR_CHAR_CODE('bevp')
kThemeSoundBevelEnter = FOUR_CHAR_CODE('beve')
kThemeSoundBevelExit = FOUR_CHAR_CODE('bevx')
kThemeSoundBevelRelease = FOUR_CHAR_CODE('bevr')
kThemeSoundLittleArrowUpPress = FOUR_CHAR_CODE('laup')
kThemeSoundLittleArrowDnPress = FOUR_CHAR_CODE('ladp')
kThemeSoundLittleArrowEnter = FOUR_CHAR_CODE('lare')
kThemeSoundLittleArrowExit = FOUR_CHAR_CODE('larx')
kThemeSoundLittleArrowUpRelease = FOUR_CHAR_CODE('laur')
kThemeSoundLittleArrowDnRelease = FOUR_CHAR_CODE('ladr')
kThemeSoundPopupPress = FOUR_CHAR_CODE('popp')
kThemeSoundPopupEnter = FOUR_CHAR_CODE('pope')
kThemeSoundPopupExit = FOUR_CHAR_CODE('popx')
kThemeSoundPopupRelease = FOUR_CHAR_CODE('popr')
kThemeSoundDisclosurePress = FOUR_CHAR_CODE('dscp')
kThemeSoundDisclosureEnter = FOUR_CHAR_CODE('dsce')
kThemeSoundDisclosureExit = FOUR_CHAR_CODE('dscx')
kThemeSoundDisclosureRelease = FOUR_CHAR_CODE('dscr')
kThemeSoundTabPressed = FOUR_CHAR_CODE('tabp')
kThemeSoundTabEnter = FOUR_CHAR_CODE('tabe')
kThemeSoundTabExit = FOUR_CHAR_CODE('tabx')
kThemeSoundTabRelease = FOUR_CHAR_CODE('tabr')
kThemeSoundDragTargetHilite = FOUR_CHAR_CODE('dthi')
kThemeSoundDragTargetUnhilite = FOUR_CHAR_CODE('dtuh')
kThemeSoundDragTargetDrop = FOUR_CHAR_CODE('dtdr')
kThemeSoundEmptyTrash = FOUR_CHAR_CODE('ftrs')
kThemeSoundSelectItem = FOUR_CHAR_CODE('fsel')
kThemeSoundNewItem = FOUR_CHAR_CODE('fnew')
kThemeSoundReceiveDrop = FOUR_CHAR_CODE('fdrp')
kThemeSoundCopyDone = FOUR_CHAR_CODE('fcpd')
kThemeSoundResolveAlias = FOUR_CHAR_CODE('fral')
kThemeSoundLaunchApp = FOUR_CHAR_CODE('flap')
kThemeSoundDiskInsert = FOUR_CHAR_CODE('dski')
kThemeSoundDiskEject = FOUR_CHAR_CODE('dske')
kThemeSoundFinderDragOnIcon = FOUR_CHAR_CODE('fdon')
kThemeSoundFinderDragOffIcon = FOUR_CHAR_CODE('fdof')
kThemePopupTabNormalPosition = 0
kThemePopupTabCenterOnWindow = 1
kThemePopupTabCenterOnOffset = 2
kThemeMetricScrollBarWidth = 0
kThemeMetricSmallScrollBarWidth = 1
kThemeMetricCheckBoxHeight = 2
kThemeMetricRadioButtonHeight = 3
kThemeMetricEditTextWhitespace = 4
kThemeMetricEditTextFrameOutset = 5
kThemeMetricListBoxFrameOutset = 6
kThemeMetricFocusRectOutset = 7
kThemeMetricImageWellThickness = 8
kThemeMetricScrollBarOverlap = 9
kThemeMetricLargeTabHeight = 10
kThemeMetricLargeTabCapsWidth = 11
kThemeMetricTabFrameOverlap = 12
kThemeMetricTabIndentOrStyle = 13
kThemeMetricTabOverlap = 14
kThemeMetricSmallTabHeight = 15
kThemeMetricSmallTabCapsWidth = 16
kThemeMetricDisclosureButtonHeight = 17
kThemeMetricRoundButtonSize = 18
kThemeMetricPushButtonHeight = 19
kThemeMetricListHeaderHeight = 20
kThemeMetricSmallCheckBoxHeight = 21
kThemeMetricDisclosureButtonWidth = 22
kThemeMetricSmallDisclosureButtonHeight = 23
kThemeMetricSmallDisclosureButtonWidth = 24
kThemeMetricDisclosureTriangleHeight = 25
kThemeMetricDisclosureTriangleWidth = 26
kThemeMetricLittleArrowsHeight = 27
kThemeMetricLittleArrowsWidth = 28
kThemeMetricPaneSplitterHeight = 29
kThemeMetricPopupButtonHeight = 30
kThemeMetricSmallPopupButtonHeight = 31
kThemeMetricLargeProgressBarThickness = 32
kThemeMetricPullDownHeight = 33
kThemeMetricSmallPullDownHeight = 34
kThemeMetricSmallPushButtonHeight = 35
kThemeMetricSmallRadioButtonHeight = 36
kThemeMetricRelevanceIndicatorHeight = 37
kThemeMetricResizeControlHeight = 38
kThemeMetricSmallResizeControlHeight = 39
kThemeMetricLargeRoundButtonSize = 40
kThemeMetricHSliderHeight = 41
kThemeMetricHSliderTickHeight = 42
kThemeMetricSmallHSliderHeight = 43
kThemeMetricSmallHSliderTickHeight = 44
kThemeMetricVSliderWidth = 45
kThemeMetricVSliderTickWidth = 46
kThemeMetricSmallVSliderWidth = 47
kThemeMetricSmallVSliderTickWidth = 48
kThemeMetricTitleBarControlsHeight = 49
kThemeMetricCheckBoxWidth = 50
kThemeMetricSmallCheckBoxWidth = 51
kThemeMetricRadioButtonWidth = 52
kThemeMetricSmallRadioButtonWidth = 53
kThemeMetricSmallHSliderMinThumbWidth = 54
kThemeMetricSmallVSliderMinThumbHeight = 55
kThemeMetricSmallHSliderTickOffset = 56
kThemeMetricSmallVSliderTickOffset = 57
kThemeMetricNormalProgressBarThickness = 58
kThemeMetricProgressBarShadowOutset = 59
kThemeMetricSmallProgressBarShadowOutset = 60
kThemeMetricPrimaryGroupBoxContentInset = 61
kThemeMetricSecondaryGroupBoxContentInset = 62
kThemeMetricMenuMarkColumnWidth = 63
kThemeMetricMenuExcludedMarkColumnWidth = 64
kThemeMetricMenuMarkIndent = 65
kThemeMetricMenuTextLeadingEdgeMargin = 66
kThemeMetricMenuTextTrailingEdgeMargin = 67
kThemeMetricMenuIndentWidth = 68
kThemeMetricMenuIconTrailingEdgeMargin = 69
# appearanceBadBrushIndexErr = themeInvalidBrushErr
# appearanceProcessRegisteredErr = themeProcessRegisteredErr
# appearanceProcessNotRegisteredErr = themeProcessNotRegisteredErr
# appearanceBadTextColorIndexErr = themeBadTextColorErr
# appearanceThemeHasNoAccents = themeHasNoAccentsErr
# appearanceBadCursorIndexErr = themeBadCursorIndexErr
kThemeActiveDialogBackgroundBrush = kThemeBrushDialogBackgroundActive
kThemeInactiveDialogBackgroundBrush = kThemeBrushDialogBackgroundInactive
kThemeActiveAlertBackgroundBrush = kThemeBrushAlertBackgroundActive
kThemeInactiveAlertBackgroundBrush = kThemeBrushAlertBackgroundInactive
kThemeActiveModelessDialogBackgroundBrush = kThemeBrushModelessDialogBackgroundActive
kThemeInactiveModelessDialogBackgroundBrush = kThemeBrushModelessDialogBackgroundInactive
kThemeActiveUtilityWindowBackgroundBrush = kThemeBrushUtilityWindowBackgroundActive
kThemeInactiveUtilityWindowBackgroundBrush = kThemeBrushUtilityWindowBackgroundInactive
kThemeListViewSortColumnBackgroundBrush = kThemeBrushListViewSortColumnBackground
kThemeListViewBackgroundBrush = kThemeBrushListViewBackground
kThemeIconLabelBackgroundBrush = kThemeBrushIconLabelBackground
kThemeListViewSeparatorBrush = kThemeBrushListViewSeparator
kThemeChasingArrowsBrush = kThemeBrushChasingArrows
kThemeDragHiliteBrush = kThemeBrushDragHilite
kThemeDocumentWindowBackgroundBrush = kThemeBrushDocumentWindowBackground
kThemeFinderWindowBackgroundBrush = kThemeBrushFinderWindowBackground
kThemeActiveScrollBarDelimiterBrush = kThemeBrushScrollBarDelimiterActive
kThemeInactiveScrollBarDelimiterBrush = kThemeBrushScrollBarDelimiterInactive
kThemeFocusHighlightBrush = kThemeBrushFocusHighlight
kThemeActivePopupArrowBrush = kThemeBrushPopupArrowActive
kThemePressedPopupArrowBrush = kThemeBrushPopupArrowPressed
kThemeInactivePopupArrowBrush = kThemeBrushPopupArrowInactive
kThemeAppleGuideCoachmarkBrush = kThemeBrushAppleGuideCoachmark
kThemeActiveDialogTextColor = kThemeTextColorDialogActive
kThemeInactiveDialogTextColor = kThemeTextColorDialogInactive
kThemeActiveAlertTextColor = kThemeTextColorAlertActive
kThemeInactiveAlertTextColor = kThemeTextColorAlertInactive
kThemeActiveModelessDialogTextColor = kThemeTextColorModelessDialogActive
kThemeInactiveModelessDialogTextColor = kThemeTextColorModelessDialogInactive
kThemeActiveWindowHeaderTextColor = kThemeTextColorWindowHeaderActive
kThemeInactiveWindowHeaderTextColor = kThemeTextColorWindowHeaderInactive
kThemeActivePlacardTextColor = kThemeTextColorPlacardActive
kThemeInactivePlacardTextColor = kThemeTextColorPlacardInactive
kThemePressedPlacardTextColor = kThemeTextColorPlacardPressed
kThemeActivePushButtonTextColor = kThemeTextColorPushButtonActive
kThemeInactivePushButtonTextColor = kThemeTextColorPushButtonInactive
kThemePressedPushButtonTextColor = kThemeTextColorPushButtonPressed
kThemeActiveBevelButtonTextColor = kThemeTextColorBevelButtonActive
kThemeInactiveBevelButtonTextColor = kThemeTextColorBevelButtonInactive
kThemePressedBevelButtonTextColor = kThemeTextColorBevelButtonPressed
kThemeActivePopupButtonTextColor = kThemeTextColorPopupButtonActive
kThemeInactivePopupButtonTextColor = kThemeTextColorPopupButtonInactive
kThemePressedPopupButtonTextColor = kThemeTextColorPopupButtonPressed
kThemeIconLabelTextColor = kThemeTextColorIconLabel
kThemeListViewTextColor = kThemeTextColorListView
kThemeActiveDocumentWindowTitleTextColor = kThemeTextColorDocumentWindowTitleActive
kThemeInactiveDocumentWindowTitleTextColor = kThemeTextColorDocumentWindowTitleInactive
kThemeActiveMovableModalWindowTitleTextColor = kThemeTextColorMovableModalWindowTitleActive
kThemeInactiveMovableModalWindowTitleTextColor = kThemeTextColorMovableModalWindowTitleInactive
kThemeActiveUtilityWindowTitleTextColor = kThemeTextColorUtilityWindowTitleActive
kThemeInactiveUtilityWindowTitleTextColor = kThemeTextColorUtilityWindowTitleInactive
kThemeActivePopupWindowTitleColor = kThemeTextColorPopupWindowTitleActive
kThemeInactivePopupWindowTitleColor = kThemeTextColorPopupWindowTitleInactive
kThemeActiveRootMenuTextColor = kThemeTextColorRootMenuActive
kThemeSelectedRootMenuTextColor = kThemeTextColorRootMenuSelected
kThemeDisabledRootMenuTextColor = kThemeTextColorRootMenuDisabled
kThemeActiveMenuItemTextColor = kThemeTextColorMenuItemActive
kThemeSelectedMenuItemTextColor = kThemeTextColorMenuItemSelected
kThemeDisabledMenuItemTextColor = kThemeTextColorMenuItemDisabled
kThemeActivePopupLabelTextColor = kThemeTextColorPopupLabelActive
kThemeInactivePopupLabelTextColor = kThemeTextColorPopupLabelInactive
kAEThemeSwitch = kAEAppearanceChanged
kThemeNoAdornment = kThemeAdornmentNone
kThemeDefaultAdornment = kThemeAdornmentDefault
kThemeFocusAdornment = kThemeAdornmentFocus
kThemeRightToLeftAdornment = kThemeAdornmentRightToLeft
kThemeDrawIndicatorOnly = kThemeAdornmentDrawIndicatorOnly
kThemeBrushPassiveAreaFill = kThemeBrushStaticAreaFill
kThemeMetricCheckBoxGlyphHeight = kThemeMetricCheckBoxHeight
kThemeMetricRadioButtonGlyphHeight = kThemeMetricRadioButtonHeight
kThemeMetricDisclosureButtonSize = kThemeMetricDisclosureButtonHeight
kThemeMetricBestListHeaderHeight = kThemeMetricListHeaderHeight
kThemeMetricSmallProgressBarThickness = kThemeMetricNormalProgressBarThickness
kThemeMetricProgressBarThickness = kThemeMetricLargeProgressBarThickness
kThemeScrollBar = kThemeMediumScrollBar
kThemeSlider = kThemeMediumSlider
kThemeProgressBar = kThemeMediumProgressBar
kThemeIndeterminateBar = kThemeMediumIndeterminateBar
| gpl-2.0 |
alihalabyah/flexx | flexx/pyscript/parser3.py | 21 | 22006 | """
Python Builtins
---------------
Most buildin functions (that make sense in JS) are automatically
translated to JavaScript: isinstance, issubclass, callable, hasattr,
getattr, setattr, delattr, print, len, max, min, chr, ord, dict, list,
tuple, range, pow, sum, round, int, float, str, bool, abs, divmod, all,
any, enumerate, zip, reversed, sorted, filter, map.
.. pyscript_example::
# "self" is replaced with "this"
self.foo
# Printing just works
print('some test')
print(a, b, c, sep='-')
# Getting the length of a string or array
len(foo)
# Rounding and abs
round(foo) # round to nearest integer
int(foo) # round towards 0 as in Python
abs(foo)
# min and max
min(foo)
min(a, b, c)
max(foo)
max(a, b, c)
# divmod
a, b = divmod(100, 7) # -> 14, 2
# Aggregation
sum(foo)
all(foo)
any(foo)
# Turning things into numbers, bools and strings
str(s)
float(x)
bool(y)
int(z) # this rounds towards zero like in Python
chr(65) # -> 'A'
ord('A') # -> 65
# Turning things into lists and dicts
dict([['foo', 1], ['bar', 2]]) # -> {'foo': 1, 'bar': 2}
list('abc') # -> ['a', 'b', 'c']
dict(other_dict) # make a copy
list(other_list) # make copy
The isinstance function (and friends)
-------------------------------------
The ``isinstance()`` function works for all JS primitive types, but also
for user-defined classes.
.. pyscript_example::
# Basic types
isinstance(3, float) # in JS there are no ints
isinstance('', str)
isinstance([], list)
isinstance({}, dict)
isinstance(foo, types.FunctionType)
# Can also use JS strings
isinstance(3, 'number')
isinstance('', 'string')
isinstance([], 'array')
isinstance({}, 'object')
isinstance(foo, 'function')
# You can use it on your own types too ...
isinstance(x, MyClass)
isinstance(x, 'MyClass') # equivalent
isinstance(x, 'Object') # also yields true (subclass of Object)
# issubclass works too
issubclass(Foo, Bar)
# As well as callable
callable(foo)
hasattr, getattr, setattr and delattr
-------------------------------------
.. pyscript_example::
a = {'foo': 1, 'bar': 2}
hasattr(a, 'foo') # -> True
hasattr(a, 'fooo') # -> False
hasattr(null, 'foo') # -> False
getattr(a, 'foo') # -> 1
getattr(a, 'fooo') # -> raise AttributeError
getattr(a, 'fooo', 3) # -> 3
getattr(null, 'foo', 3) # -> 3
setattr(a, 'foo', 2)
delattr(a, 'foo')
Creating sequences
------------------
.. pyscript_example::
range(10)
range(2, 10, 2)
range(100, 0, -1)
reversed(foo)
sorted(foo)
enumerate(foo)
zip(foo, bar)
filter(func, foo)
map(func, foo)
List methods
------------
.. pyscript_example::
# Call a.append() if it exists, otherwise a.push()
a.append(x)
# Similar for remove()
a.remove(x)
Dict methods
------------
.. pyscript_example::
a = {'foo': 3}
a['foo']
a.get('foo', 0)
a.get('foo')
a.keys()
Str methods
-----------
.. pyscript_example::
"foobar".startswith('foo')
Additional sugar
----------------
.. pyscript_example::
# Get time (number of seconds since epoch)
print(time.time())
# High resolution timer (as in time.perf_counter on Python 3)
t0 = time.perf_counter()
do_something()
t1 = time.perf_counter()
print('this took me', t1-t0, 'seconds')
"""
import ast
from .parser2 import Parser2, JSError, unify # noqa
# List of possibly relevant builtin functions:
#
# abs all any bin bool callable chr complex delattr dict dir divmod
# enumerate eval exec filter float format getattr globals hasattr hash
# hex id int isinstance issubclass iter len list locals map max min next
# object oct ord pow print property range repr reversed round set setattr
# slice sorted str sum super tuple type vars zip
#
# Further, all methods of: list, dict, str, set?
# todo: make these more robust by not applying the Python version if a JS version exists.
class Parser3(Parser2):
""" Parser to transcompile Python to JS, allowing more Pythonic
code, like ``self``, ``print()``, ``len()``, list methods, etc.
"""
NAME_MAP = {'self': 'this', }
NAME_MAP.update(Parser2.NAME_MAP)
## Hardcore functions (hide JS functions with the same name)
def function_isinstance(self, node):
if len(node.args) != 2:
raise JSError('isinstance() expects two arguments.')
ob = unify(self.parse(node.args[0]))
cls = unify(self.parse(node.args[1]))
if cls[0] in '"\'':
cls = cls[1:-1] # remove quotes
BASIC_TYPES = ('number', 'boolean', 'string', 'function', 'array',
'object', 'null', 'undefined')
MAP = {'[int, float]': 'number', '[float, int]': 'number', 'float': 'number',
'str': 'string', 'basestring': 'string', 'string_types': 'string',
'bool': 'boolean',
'FunctionType': 'function', 'types.FunctionType': 'function',
'list': 'array', 'tuple': 'array',
'[list, tuple]': 'array', '[tuple, list]': 'array',
'dict': 'object',
}
cmp = MAP.get(cls, cls)
if cmp.lower() in BASIC_TYPES:
# Basic type, use Object.prototype.toString
# http://stackoverflow.com/questions/11108877
return ["({}).toString.call(",
ob,
").match(/\s([a-zA-Z]+)/)[1].toLowerCase() === ",
repr(cmp.lower())
]
else:
# User defined type, use instanceof
# http://tobyho.com/2011/01/28/checking-types-in-javascript/
cmp = unify(cls)
if cmp[0] == '(':
raise JSError('isinstance() can only compare to simple types')
return ob, " instanceof ", cmp
def function_issubclass(self, node):
# issubclass only needs to work on custom classes
if len(node.args) != 2:
raise JSError('issubclass() expects two arguments.')
cls1 = unify(self.parse(node.args[0]))
cls2 = unify(self.parse(node.args[1]))
if cls2 == 'object':
cls2 = 'Object'
return '(%s.prototype instanceof %s)' % (cls1, cls2)
def function_hasattr(self, node):
if len(node.args) == 2:
ob = unify(self.parse(node.args[0]))
name = unify(self.parse(node.args[1]))
dummy1 = self.dummy()
t = "((%s=%s) !== undefined && %s !== null && %s[%s] !== undefined)"
return t % (dummy1, ob, dummy1, dummy1, name)
else:
raise JSError('hasattr() expects two arguments.')
def function_getattr(self, node):
is_ok = "(ob !== undefined && ob !== null && ob[name] !== undefined)"
if len(node.args) == 2:
ob = unify(self.parse(node.args[0]))
name = unify(self.parse(node.args[1]))
func = "(function (ob, name) {if %s {return ob[name];} " % is_ok
func += "else {var e = Error(name); e.name='AttributeError'; throw e;}})"
return func + '(%s, %s)' % (ob, name)
elif len(node.args) == 3:
ob = unify(self.parse(node.args[0]))
name = unify(self.parse(node.args[1]))
default = unify(self.parse(node.args[2]))
func = "(function (ob, name, dflt) {if %s {return ob[name];} " % is_ok
func += "else {return dflt;}})"
return func + '(%s, %s, %s)' % (ob, name, default)
else:
raise JSError('hasattr() expects two or three arguments.')
def function_setattr(self, node):
is_ok = "(ob !== undefined && ob !== null && ob[name] !== undefined)"
if len(node.args) == 3:
ob = unify(self.parse(node.args[0]))
name = unify(self.parse(node.args[1]))
value = unify(self.parse(node.args[2]))
return '%s[%s] = %s' % (ob, name, value)
else:
raise JSError('setattr() expects three arguments.')
def function_delattr(self, node):
if len(node.args) == 2:
ob = unify(self.parse(node.args[0]))
name = unify(self.parse(node.args[1]))
return 'delete %s[%s]' % (ob, name)
else:
raise JSError('delattr() expects two arguments.')
def function_print(self, node):
# Process keywords
sep, end = '" "', ''
for kw in node.keywords:
if kw.arg == 'sep':
sep = ''.join(self.parse(kw.value))
elif kw.arg == 'end':
end = ''.join(self.parse(kw.value))
elif kw.arg in ('file', 'flush'):
raise JSError('print() file and flush args not supported')
else:
raise JSError('Invalid argument for print(): %r' % kw.arg)
# Combine args
args = [unify(self.parse(arg)) for arg in node.args]
end = (" + %s" % end) if (args and end and end != '\n') else ''
combiner = ' + %s + ' % sep
args_concat = combiner.join(args)
return 'console.log(' + args_concat + end + ')'
def function_len(self, node):
if len(node.args) == 1:
return unify(self.parse(node.args[0])), '.length'
else:
return None # don't apply this feature
def function_max(self, node):
if len(node.args) == 0:
raise JSError('max() needs at least one argument')
elif len(node.args) == 1:
arg = ''.join(self.parse(node.args[0]))
return 'Math.max.apply(null, ', arg, ')'
else:
args = ', '.join([unify(self.parse(arg)) for arg in node.args])
return 'Math.max(', args, ')'
def function_min(self, node):
if len(node.args) == 0:
raise JSError('min() needs at least one argument')
elif len(node.args) == 1:
arg = ''.join(self.parse(node.args[0]))
return 'Math.min.apply(null, ', arg, ')'
else:
args = ', '.join([unify(self.parse(arg)) for arg in node.args])
return 'Math.min(', args, ')'
def function_callable(self, node):
if len(node.args) == 1:
arg = unify(self.parse(node.args[0]))
return '(typeof %s === "function")' % arg
else:
raise JSError('callable() needs at least one argument')
def function_chr(self, node):
if len(node.args) == 1:
arg = ''.join(self.parse(node.args[0]))
return 'String.fromCharCode(%s)' % arg
else:
raise JSError('chr() needs at least one argument')
def function_ord(self, node):
if len(node.args) == 1:
arg = ''.join(self.parse(node.args[0]))
return '%s.charCodeAt(0)' % arg
else:
raise JSError('ord() needs at least one argument')
def function_dict(self, node):
if len(node.args) == 0:
return '{}'
if len(node.args) == 1:
code = '(function(x) {var t, i, keys, r={};'
code += 'if (Array.isArray(x)) {'
code += 'for (i=0; i<x.length; i++) {t=x[i]; r[t[0]] = t[1];} return r;'
code += '} else {'
code += 'keys = Object.keys(x); for (i=0; i<keys.length; i++) {t=keys[i]; r[t] = x[t];} return r;}})'
return code + '(%s)' % ''.join(self.parse(node.args[0]))
else:
raise JSError('dict() needs at least one argument')
def function_list(self, node):
if len(node.args) == 0:
return '[]'
if len(node.args) == 1:
code = '(function(x) {var r=[];'
code += 'if (typeof x==="object" && !Array.isArray(x)) {x=Object.keys(x)}'
code += 'for (var i=0; i<x.length; i++) {r.push(x[i]);} return r;})'
return code + '(%s)' % ''.join(self.parse(node.args[0]))
else:
raise JSError('list() needs at least one argument')
def function_tuple(self, node):
return self.function_list(node)
def function_range(self, node):
fun = 'function (start, end, step) {var i, res = []; for (i=start; i<end; i+=step) {res.push(i);} return res;}'
if len(node.args) == 1:
end = unify(self.parse(node.args[0]))
return '(%s)(0, %s, 1)' % (fun, end)
elif len(node.args) == 2:
start = unify(self.parse(node.args[0]))
end = unify(self.parse(node.args[1]))
return '(%s)(%s, %s, 1)' % (fun, start, end)
elif len(node.args) == 3:
start = unify(self.parse(node.args[0]))
end = unify(self.parse(node.args[1]))
step = ''.join(self.parse(node.args[2]))
if step.lstrip('+-').isnumeric() and float(step) < 0:
fun = fun.replace('<', '>')
return '(%s)(%s, %s, %s)' % (fun, start, end, step)
else:
raise JSError('range() needs 1, 2 or 3 arguments')
## Normal functions (can be overloaded)
def function_pow(self, node):
if len(node.args) == 2:
self.vars_for_functions['pow'] = 'Math.pow'
return None
else:
raise JSError('pow() needs exactly two argument2')
def function_sum(self, node):
if len(node.args) == 1:
code = 'function (x) {return x.reduce(function(a, b) {return a + b;});}'
self.vars_for_functions['sum'] = code
return None
else:
raise JSError('sum() needs exactly one argument')
def function_round(self, node):
if len(node.args) == 1:
self.vars_for_functions['round'] = 'Math.round'
else:
raise JSError('round() needs at least one argument')
def function_int(self, node):
# No need to turn into number first
if len(node.args) == 1:
code = 'function (x) {return x<0 ? Math.ceil(x): Math.floor(x);}'
self.vars_for_functions['int'] = code
else:
raise JSError('int() needs one argument')
def function_float(self, node):
if len(node.args) == 1:
self.vars_for_functions['float'] = 'Number'
else:
raise JSError('float() needs one argument')
def function_str(self, node):
if len(node.args) in (0, 1):
self.vars_for_functions['str'] = 'String'
else:
raise JSError('str() needs zero or one argument')
def function_repr(self, node):
if len(node.args) == 1:
# code = 'function (x) {if (typeof x === "object") {return JSON.stringify(x);}'
# code += ' else if (typeof x === "string") {return "\'" + x + "\'";}'
# code += ' else {return x.toString();}}'
self.vars_for_functions['repr'] = 'JSON.stringify'
else:
raise JSError('repr() needs one argument')
def function_bool(self, node):
if len(node.args) == 1:
self._wrap_truthy(ast.Name('x', '')) # trigger _truthy function declaration
self.vars_for_functions['bool'] = 'function (x) {return Boolean(_truthy(x));}'
else:
raise JSError('bool() needs one argument')
def function_abs(self, node):
if len(node.args) == 1:
self.vars_for_functions['abs'] = 'Math.abs'
else:
raise JSError('abs() needs one argument')
def function_divmod(self, node):
if len(node.args) == 2:
code = 'function (x, y) {var m = x % y; return [(x-m)/y, m];}'
self.vars_for_functions['divmod'] = code
else:
raise JSError('divmod() needs two arguments')
def function_all(self, node):
if len(node.args) == 1:
self._wrap_truthy(ast.Name('x', '')) # trigger _truthy function declaration
code = 'function (x) {for (var i=0; i<x.length; i++) {if (!_truthy(x[i])){return false}} return true;}'
self.vars_for_functions['all'] = code
else:
raise JSError('all() needs one argument')
def function_any(self, node):
if len(node.args) == 1:
self._wrap_truthy(ast.Name('x', '')) # trigger _truthy function declaration
code = 'function (x) {for (var i=0; i<x.length; i++) {if (_truthy(x[i])){return true}} return false;}'
self.vars_for_functions['any'] = code
else:
raise JSError('any() needs one argument')
def function_enumerate(self, node):
if len(node.args) == 1:
code = 'function (iter) { var i, res=[];'
code += self._make_iterable('iter', 'iter', False)
code += 'for (i=0; i<iter.length; i++) {res.push([i, iter[i]]);}'
code += 'return res;}'
self.vars_for_functions['enumerate'] = code
else:
raise JSError('enumerate() needs one argument')
def function_zip(self, node):
if len(node.args) == 2:
code = 'function (iter1, iter2) { var i, res=[];'
code += self._make_iterable('iter1', 'iter1', False)
code += self._make_iterable('iter2', 'iter2', False)
code += 'var len = Math.min(iter1.length, iter2.length);'
code += 'for (i=0; i<len; i++) {res.push([iter1[i], iter2[i]]);}'
code += 'return res;}'
self.vars_for_functions['zip'] = code
else:
raise JSError('zip() needs two arguments')
def function_reversed(self, node):
if len(node.args) == 1:
code = 'function (iter) {'
code += self._make_iterable('iter', 'iter', False)
code += 'return iter.slice().reverse();}'
self.vars_for_functions['reversed'] = code
else:
raise JSError('reversed() needs one argument')
def function_sorted(self, node):
if len(node.args) == 1:
code = 'function (iter) {'
code += self._make_iterable('iter', 'iter', False)
code += 'return iter.slice().sort();}'
self.vars_for_functions['sorted'] = code
else:
raise JSError('sorted() needs one argument')
def function_filter(self, node):
if len(node.args) == 2:
code = 'function (func, iter) {'
code += 'if (typeof func === "undefined" || func === null) {func = function(x) {return x;}}'
code += 'return iter.filter(func);}'
self.vars_for_functions['filter'] = code
else:
raise JSError('filter() needs two arguments')
def function_map(self, node):
if len(node.args) == 2:
code = 'function (func, iter) {return iter.map(func);}'
self.vars_for_functions['map'] = code
else:
raise JSError('map() needs two arguments')
## List methods
def method_append(self, node, base):
if len(node.args) == 1:
code = []
code.append('(%s.append || %s.push).apply(%s, [' % (base, base, base))
code += self.parse(node.args[0])
code.append('])')
return code
def method_remove(self, node, base):
if len(node.args) == 1:
code = []
remove_func = 'function (x) {this.splice(this.indexOf(x), 1);}'
code.append('(%s.remove || %s).apply(%s, [' % (base, remove_func, base))
code += self.parse(node.args[0])
code.append('])')
return code
## Dict methods
def method_get(self, node, base):
if len(node.args) in (1, 2):
# Get name to call object - use simple name if we can
ob_name = base
ob_name1 = base
if not base.isalnum():
dummy = self.dummy()
ob_name = dummy
ob_name1 = '(%s=%s)' % (dummy, base)
# Get args
key = unify(self.parse(node.args[0]))
default = 'null'
normal_args = ''.join(self.parse(node.args[0]))
if len(node.args) == 2:
default = unify(self.parse(node.args[1]))
normal_args += ', ' + ''.join(self.parse(node.args[1]))
# Compose
dict_get = '(%s[%s] || %s)' % (ob_name, key, default)
normal_get = '%s.get(%s)' % (ob_name, normal_args)
return '(/*py-dict.get*/typeof %s.get==="function" ? %s : %s)' % (
ob_name1, normal_get, dict_get)
def method_keys(self, node, base):
if len(node.args) == 0:
return 'Object.keys(%s)' % base
## Str methods
def method_startswith(self, node, base):
if len(node.args) == 1:
arg = unify(self.parse(node.args[0]))
return unify(base), '.indexOf(', arg, ') == 0'
## Extra functions / methods
def method_time(self, node, base): # time.time()
if base == 'time':
if len(node.args) == 0:
return '((new Date()).getTime() / 1000)'
else:
raise JSError('time() needs no argument')
def method_perf_counter(self, node, base): # time.perf_counter()
if base == 'time':
if len(node.args) == 0:
# Work in nodejs and browser
dummy = self.dummy()
return '(typeof(process) === "undefined" ? performance.now()*1e-3 : ((%s=process.hrtime())[0] + %s[1]*1e-9))' % (dummy, dummy)
else:
raise JSError('perf_counter() needs no argument')
| bsd-2-clause |
jonycgn/scipy | scipy/stats/tests/test_binned_statistic.py | 50 | 8793 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_array_almost_equal, run_module_suite
from scipy.stats import (binned_statistic, binned_statistic_2d,
binned_statistic_dd)
from scipy._lib.six import u
from common_tests import check_named_results
class TestBinnedStatistic(object):
@classmethod
def setup_class(cls):
np.random.seed(9865)
cls.x = np.random.random(100)
cls.y = np.random.random(100)
cls.v = np.random.random(100)
cls.X = np.random.random((100, 3))
def test_1d_count(self):
x = self.x
v = self.v
count1, edges1, bc = binned_statistic(x, v, 'count', bins=10)
count2, edges2 = np.histogram(x, bins=10)
assert_array_almost_equal(count1, count2)
assert_array_almost_equal(edges1, edges2)
def test_1d_result_attributes(self):
x = self.x
v = self.v
res = binned_statistic(x, v, 'count', bins=10)
attributes = ('statistic', 'bin_edges', 'binnumber')
check_named_results(res, attributes)
def test_1d_sum(self):
x = self.x
v = self.v
sum1, edges1, bc = binned_statistic(x, v, 'sum', bins=10)
sum2, edges2 = np.histogram(x, bins=10, weights=v)
assert_array_almost_equal(sum1, sum2)
assert_array_almost_equal(edges1, edges2)
def test_1d_mean(self):
x = self.x
v = self.v
stat1, edges1, bc = binned_statistic(x, v, 'mean', bins=10)
stat2, edges2, bc = binned_statistic(x, v, np.mean, bins=10)
assert_array_almost_equal(stat1, stat2)
assert_array_almost_equal(edges1, edges2)
def test_1d_std(self):
x = self.x
v = self.v
stat1, edges1, bc = binned_statistic(x, v, 'std', bins=10)
stat2, edges2, bc = binned_statistic(x, v, np.std, bins=10)
assert_array_almost_equal(stat1, stat2)
assert_array_almost_equal(edges1, edges2)
def test_1d_median(self):
x = self.x
v = self.v
stat1, edges1, bc = binned_statistic(x, v, 'median', bins=10)
stat2, edges2, bc = binned_statistic(x, v, np.median, bins=10)
assert_array_almost_equal(stat1, stat2)
assert_array_almost_equal(edges1, edges2)
def test_1d_bincode(self):
x = self.x[:20]
v = self.v[:20]
count1, edges1, bc = binned_statistic(x, v, 'count', bins=3)
bc2 = np.array([3, 2, 1, 3, 2, 3, 3, 3, 3, 1, 1, 3, 3, 1, 2, 3, 1,
1, 2, 1])
bcount = [(bc == i).sum() for i in np.unique(bc)]
assert_array_almost_equal(bc, bc2)
assert_array_almost_equal(bcount, count1)
def test_1d_range_keyword(self):
# Regression test for gh-3063, range can be (min, max) or [(min, max)]
np.random.seed(9865)
x = np.arange(30)
data = np.random.random(30)
mean, bins, _ = binned_statistic(x[:15], data[:15])
mean_range, bins_range, _ = binned_statistic(x, data, range=[(0, 14)])
mean_range2, bins_range2, _ = binned_statistic(x, data, range=(0, 14))
assert_array_almost_equal(mean, mean_range)
assert_array_almost_equal(bins, bins_range)
assert_array_almost_equal(mean, mean_range2)
assert_array_almost_equal(bins, bins_range2)
def test_2d_count(self):
x = self.x
y = self.y
v = self.v
count1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'count', bins=5)
count2, binx2, biny2 = np.histogram2d(x, y, bins=5)
assert_array_almost_equal(count1, count2)
assert_array_almost_equal(binx1, binx2)
assert_array_almost_equal(biny1, biny2)
def test_2d_result_attributes(self):
x = self.x
y = self.y
v = self.v
res = binned_statistic_2d(x, y, v, 'count', bins=5)
attributes = ('statistic', 'x_edge', 'y_edge', 'binnumber')
check_named_results(res, attributes)
def test_2d_sum(self):
x = self.x
y = self.y
v = self.v
sum1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'sum', bins=5)
sum2, binx2, biny2 = np.histogram2d(x, y, bins=5, weights=v)
assert_array_almost_equal(sum1, sum2)
assert_array_almost_equal(binx1, binx2)
assert_array_almost_equal(biny1, biny2)
def test_2d_mean(self):
x = self.x
y = self.y
v = self.v
stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'mean', bins=5)
stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.mean, bins=5)
assert_array_almost_equal(stat1, stat2)
assert_array_almost_equal(binx1, binx2)
assert_array_almost_equal(biny1, biny2)
def test_2d_mean_unicode(self):
x = self.x
y = self.y
v = self.v
stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, u('mean'), bins=5)
stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.mean, bins=5)
assert_array_almost_equal(stat1, stat2)
assert_array_almost_equal(binx1, binx2)
assert_array_almost_equal(biny1, biny2)
def test_2d_std(self):
x = self.x
y = self.y
v = self.v
stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'std', bins=5)
stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.std, bins=5)
assert_array_almost_equal(stat1, stat2)
assert_array_almost_equal(binx1, binx2)
assert_array_almost_equal(biny1, biny2)
def test_2d_median(self):
x = self.x
y = self.y
v = self.v
stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'median', bins=5)
stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.median, bins=5)
assert_array_almost_equal(stat1, stat2)
assert_array_almost_equal(binx1, binx2)
assert_array_almost_equal(biny1, biny2)
def test_2d_bincode(self):
x = self.x[:20]
y = self.y[:20]
v = self.v[:20]
count1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'count', bins=3)
bc2 = np.array([17, 11, 6, 16, 11, 17, 18, 17, 17, 7, 6, 18, 16,
6, 11, 16, 6, 6, 11, 8])
bcount = [(bc == i).sum() for i in np.unique(bc)]
assert_array_almost_equal(bc, bc2)
count1adj = count1[count1.nonzero()]
assert_array_almost_equal(bcount, count1adj)
def test_dd_count(self):
X = self.X
v = self.v
count1, edges1, bc = binned_statistic_dd(X, v, 'count', bins=3)
count2, edges2 = np.histogramdd(X, bins=3)
assert_array_almost_equal(count1, count2)
assert_array_almost_equal(edges1, edges2)
def test_dd_result_attributes(self):
X = self.X
v = self.v
res = binned_statistic_dd(X, v, 'count', bins=3)
attributes = ('statistic', 'bin_edges', 'binnumber')
check_named_results(res, attributes)
def test_dd_sum(self):
X = self.X
v = self.v
sum1, edges1, bc = binned_statistic_dd(X, v, 'sum', bins=3)
sum2, edges2 = np.histogramdd(X, bins=3, weights=v)
assert_array_almost_equal(sum1, sum2)
assert_array_almost_equal(edges1, edges2)
def test_dd_mean(self):
X = self.X
v = self.v
stat1, edges1, bc = binned_statistic_dd(X, v, 'mean', bins=3)
stat2, edges2, bc = binned_statistic_dd(X, v, np.mean, bins=3)
assert_array_almost_equal(stat1, stat2)
assert_array_almost_equal(edges1, edges2)
def test_dd_std(self):
X = self.X
v = self.v
stat1, edges1, bc = binned_statistic_dd(X, v, 'std', bins=3)
stat2, edges2, bc = binned_statistic_dd(X, v, np.std, bins=3)
assert_array_almost_equal(stat1, stat2)
assert_array_almost_equal(edges1, edges2)
def test_dd_median(self):
X = self.X
v = self.v
stat1, edges1, bc = binned_statistic_dd(X, v, 'median', bins=3)
stat2, edges2, bc = binned_statistic_dd(X, v, np.median, bins=3)
assert_array_almost_equal(stat1, stat2)
assert_array_almost_equal(edges1, edges2)
def test_dd_bincode(self):
X = self.X[:20]
v = self.v[:20]
count1, edges1, bc = binned_statistic_dd(X, v, 'count', bins=3)
bc2 = np.array([63, 33, 86, 83, 88, 67, 57, 33, 42, 41, 82, 83, 92,
32, 36, 91, 43, 87, 81, 81])
bcount = [(bc == i).sum() for i in np.unique(bc)]
assert_array_almost_equal(bc, bc2)
count1adj = count1[count1.nonzero()]
assert_array_almost_equal(bcount, count1adj)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
Shraddha512/servo | tests/wpt/run.py | 13 | 1745 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import sys, os, argparse
here = os.path.split(__file__)[0]
servo_root = os.path.abspath(os.path.join(here, "..", ".."))
def wptsubdir(*args):
return os.path.join(here, *args)
# Imports
sys.path.append(wptsubdir("web-platform-tests"))
sys.path.append(wptsubdir("web-platform-tests", "tools", "scripts"))
from wptrunner import wptrunner, wptcommandline
import manifest
def update_manifest():
manifest.update_manifest(wptsubdir("web-platform-tests"),
rebuild=False,
experimental_include_local_changes=True,
path=wptsubdir("metadata", "MANIFEST.json"))
return True
def run_tests(**kwargs):
if not os.path.isfile(wptsubdir("metadata", "MANIFEST.json")):
raise Exception("Manifest not found. Please use --update-manifest in WPTARGS to create one")
wptrunner.setup_logging(kwargs, {"raw": sys.stdout})
return wptrunner.run_tests(**kwargs)
def set_defaults(args):
args.include_manifest = args.include_manifest if args.include_manifest else wptsubdir("include.ini")
args.product = "servo"
rv = vars(args)
wptcommandline.check_args(rv)
return rv
def main():
parser = wptcommandline.create_parser()
parser.add_argument('--update-manifest', dest='update_manifest', action='store_true')
args = parser.parse_args()
if args.update_manifest:
return update_manifest()
kwargs = set_defaults(args)
return run_tests(**kwargs)
if __name__ == "__main__":
sys.exit(0 if main() else 1)
| mpl-2.0 |
mikedh/trimesh | trimesh/proximity.py | 1 | 19400 | """
proximity.py
---------------
Query mesh- point proximity.
"""
import numpy as np
from . import util
from .grouping import group_min
from .constants import tol, log_time
from .triangles import closest_point as closest_point_corresponding
from .triangles import points_to_barycentric
try:
from scipy.spatial import cKDTree
except BaseException as E:
from .exceptions import closure
cKDTree = closure(E)
def nearby_faces(mesh, points):
"""
For each point find nearby faces relatively quickly.
The closest point on the mesh to the queried point is guaranteed to be
on one of the faces listed.
Does this by finding the nearest vertex on the mesh to each point, and
then returns all the faces that intersect the axis aligned bounding box
centered at the queried point and extending to the nearest vertex.
Parameters
----------
mesh : trimesh.Trimesh
Mesh to query.
points : (n, 3) float
Points in space
Returns
-----------
candidates : (points,) int
Sequence of indexes for mesh.faces
"""
points = np.asanyarray(points, dtype=np.float64)
if not util.is_shape(points, (-1, 3)):
raise ValueError('points must be (n,3)!')
# an r-tree containing the axis aligned bounding box for every triangle
rtree = mesh.triangles_tree
# a kd-tree containing every vertex of the mesh
kdtree = cKDTree(mesh.vertices[mesh.referenced_vertices])
# query the distance to the nearest vertex to get AABB of a sphere
distance_vertex = kdtree.query(points)[0].reshape((-1, 1))
distance_vertex += tol.merge
# axis aligned bounds
bounds = np.column_stack((points - distance_vertex,
points + distance_vertex))
# faces that intersect axis aligned bounding box
candidates = [list(rtree.intersection(b)) for b in bounds]
return candidates
def closest_point_naive(mesh, points):
"""
Given a mesh and a list of points find the closest point
on any triangle.
Does this by constructing a very large intermediate array and
comparing every point to every triangle.
Parameters
----------
mesh : Trimesh
Takes mesh to have same interfaces as `closest_point`
points : (m, 3) float
Points in space
Returns
----------
closest : (m, 3) float
Closest point on triangles for each point
distance : (m,) float
Distances between point and triangle
triangle_id : (m,) int
Index of triangle containing closest point
"""
# get triangles from mesh
triangles = mesh.triangles.view(np.ndarray)
# establish that input points are sane
points = np.asanyarray(points, dtype=np.float64)
if not util.is_shape(triangles, (-1, 3, 3)):
raise ValueError('triangles shape incorrect')
if not util.is_shape(points, (-1, 3)):
raise ValueError('points must be (n,3)')
# create a giant tiled array of each point tiled len(triangles) times
points_tiled = np.tile(points, (1, len(triangles)))
on_triangle = np.array([closest_point_corresponding(
triangles, i.reshape((-1, 3))) for i in points_tiled])
# distance squared
distance_2 = [((i - q)**2).sum(axis=1)
for i, q in zip(on_triangle, points)]
triangle_id = np.array([i.argmin() for i in distance_2])
# closest cartesian point
closest = np.array([g[i] for i, g in zip(triangle_id, on_triangle)])
distance = np.array([g[i] for i, g in zip(triangle_id, distance_2)]) ** .5
return closest, distance, triangle_id
def closest_point(mesh, points):
"""
Given a mesh and a list of points find the closest point
on any triangle.
Parameters
----------
mesh : trimesh.Trimesh
Mesh to query
points : (m, 3) float
Points in space
Returns
----------
closest : (m, 3) float
Closest point on triangles for each point
distance : (m,) float
Distance to mesh.
triangle_id : (m,) int
Index of triangle containing closest point
"""
points = np.asanyarray(points, dtype=np.float64)
if not util.is_shape(points, (-1, 3)):
raise ValueError('points must be (n,3)!')
# do a tree- based query for faces near each point
candidates = nearby_faces(mesh, points)
# view triangles as an ndarray so we don't have to recompute
# the MD5 during all of the subsequent advanced indexing
triangles = mesh.triangles.view(np.ndarray)
# create the corresponding list of triangles
# and query points to send to the closest_point function
all_candidates = np.concatenate(candidates)
num_candidates = list(map(len, candidates))
tile_idxs = np.repeat(np.arange(len(points)), num_candidates)
query_point = points[tile_idxs, :]
query_tri = triangles[all_candidates]
# do the computation for closest point
query_close = closest_point_corresponding(query_tri, query_point)
query_group = np.cumsum(num_candidates)[:-1]
# vectors and distances for
# closest point to query point
query_vector = query_point - query_close
query_distance = util.diagonal_dot(query_vector, query_vector)
# get best two candidate indices by arg-sorting the per-query_distances
qds = np.array_split(query_distance, query_group)
idxs = np.int32([qd.argsort()[:2] if len(qd) > 1 else [0, 0] for qd in qds])
idxs[1:] += query_group.reshape(-1, 1)
# points, distances and triangle ids for best two candidates
two_points = query_close[idxs]
two_dists = query_distance[idxs]
two_candidates = all_candidates[idxs]
# the first candidate is the best result for unambiguous cases
result_close = query_close[idxs[:, 0]]
result_tid = two_candidates[:, 0]
result_distance = two_dists[:, 0]
# however: same closest point on two different faces
# find the best one and correct triangle ids if necessary
check_distance = two_dists.ptp(axis=1) < tol.merge
check_magnitude = np.all(np.abs(two_dists) > tol.merge, axis=1)
# mask results where corrections may be apply
c_mask = np.bitwise_and(check_distance, check_magnitude)
# get two face normals for the candidate points
normals = mesh.face_normals[two_candidates[c_mask]]
# compute normalized surface-point to query-point vectors
vectors = (query_vector[idxs[c_mask]] /
two_dists[c_mask].reshape(-1, 2, 1) ** 0.5)
# compare enclosed angle for both face normals
dots = (normals * vectors).sum(axis=2)
# take the idx with the most positive angle
# allows for selecting the correct candidate triangle id
c_idxs = dots.argmax(axis=1)
# correct triangle ids where necessary
# closest point and distance remain valid
result_tid[c_mask] = two_candidates[c_mask, c_idxs]
result_distance[c_mask] = two_dists[c_mask, c_idxs]
result_close[c_mask] = two_points[c_mask, c_idxs]
# we were comparing the distance squared so
# now take the square root in one vectorized operation
result_distance **= .5
return result_close, result_distance, result_tid
def signed_distance(mesh, points):
"""
Find the signed distance from a mesh to a list of points.
* Points OUTSIDE the mesh will have NEGATIVE distance
* Points within tol.merge of the surface will have POSITIVE distance
* Points INSIDE the mesh will have POSITIVE distance
Parameters
-----------
mesh : trimesh.Trimesh
Mesh to query.
points : (n, 3) float
Points in space
Returns
----------
signed_distance : (n,) float
Signed distance from point to mesh
"""
# make sure we have a numpy array
points = np.asanyarray(points, dtype=np.float64)
# find the closest point on the mesh to the queried points
closest, distance, triangle_id = closest_point(mesh, points)
# we only care about nonzero distances
nonzero = distance > tol.merge
if not nonzero.any():
return distance
# For closest points that project directly in to the triangle, compute sign from
# triangle normal Project each point in to the closest triangle plane
nonzero = np.where(nonzero)[0]
normals = mesh.face_normals[triangle_id]
projection = (points[nonzero] -
(normals[nonzero].T * np.einsum(
"ij,ij->i",
points[nonzero] - closest[nonzero],
normals[nonzero])).T)
# Determine if the projection lies within the closest triangle
barycentric = points_to_barycentric(
mesh.triangles[triangle_id[nonzero]],
projection)
ontriangle = ~((
(barycentric < -tol.merge) | (barycentric > 1 + tol.merge)
).any(axis=1))
# Where projection does lie in the triangle, compare vector to projection to the
# triangle normal to compute sign
sign = np.sign(np.einsum(
"ij,ij->i",
normals[nonzero[ontriangle]],
points[nonzero[ontriangle]] - projection[ontriangle]))
distance[nonzero[ontriangle]] *= -1.0 * sign
# For all other triangles, resort to raycasting against the entire mesh
inside = mesh.ray.contains_points(points[nonzero[~ontriangle]])
sign = (inside.astype(int) * 2) - 1.0
# apply sign to previously computed distance
distance[nonzero[~ontriangle]] *= sign
return distance
class ProximityQuery(object):
"""
Proximity queries for the current mesh.
"""
def __init__(self, mesh):
self._mesh = mesh
@log_time
def on_surface(self, points):
"""
Given list of points, for each point find the closest point
on any triangle of the mesh.
Parameters
----------
points : (m,3) float, points in space
Returns
----------
closest : (m, 3) float
Closest point on triangles for each point
distance : (m,) float
Distance to surface
triangle_id : (m,) int
Index of closest triangle for each point.
"""
return closest_point(mesh=self._mesh,
points=points)
def vertex(self, points):
"""
Given a set of points, return the closest vertex index to each point
Parameters
----------
points : (n, 3) float
Points in space
Returns
----------
distance : (n,) float
Distance from source point to vertex.
vertex_id : (n,) int
Index of mesh.vertices for closest vertex.
"""
tree = self._mesh.kdtree
return tree.query(points)
def signed_distance(self, points):
"""
Find the signed distance from a mesh to a list of points.
* Points OUTSIDE the mesh will have NEGATIVE distance
* Points within tol.merge of the surface will have POSITIVE distance
* Points INSIDE the mesh will have POSITIVE distance
Parameters
-----------
points : (n, 3) float
Points in space
Returns
----------
signed_distance : (n,) float
Signed distance from point to mesh.
"""
return signed_distance(self._mesh, points)
def longest_ray(mesh, points, directions):
"""
Find the lengths of the longest rays which do not intersect the mesh
cast from a list of points in the provided directions.
Parameters
-----------
points : (n, 3) float
Points in space.
directions : (n, 3) float
Directions of rays.
Returns
----------
signed_distance : (n,) float
Length of rays.
"""
points = np.asanyarray(points, dtype=np.float64)
if not util.is_shape(points, (-1, 3)):
raise ValueError('points must be (n,3)!')
directions = np.asanyarray(directions, dtype=np.float64)
if not util.is_shape(directions, (-1, 3)):
raise ValueError('directions must be (n,3)!')
if len(points) != len(directions):
raise ValueError('number of points must equal number of directions!')
faces, rays, locations = mesh.ray.intersects_id(points, directions,
return_locations=True,
multiple_hits=True)
if len(rays) > 0:
distances = np.linalg.norm(locations - points[rays],
axis=1)
else:
distances = np.array([])
# Reject intersections at distance less than tol.planar
rays = rays[distances > tol.planar]
distances = distances[distances > tol.planar]
# Add infinite length for those with no valid intersection
no_intersections = np.setdiff1d(np.arange(len(points)), rays)
rays = np.concatenate((rays, no_intersections))
distances = np.concatenate((distances,
np.repeat(np.inf,
len(no_intersections))))
return group_min(rays, distances)
def max_tangent_sphere(mesh,
points,
inwards=True,
normals=None,
threshold=1e-6,
max_iter=100):
"""
Find the center and radius of the sphere which is tangent to
the mesh at the given point and at least one more point with no
non-tangential intersections with the mesh.
Masatomo Inui, Nobuyuki Umezu & Ryohei Shimane (2016)
Shrinking sphere:
A parallel algorithm for computing the thickness of 3D objects,
Computer-Aided Design and Applications, 13:2, 199-207,
DOI: 10.1080/16864360.2015.1084186
Parameters
----------
points : (n, 3) float
Points in space.
inwards : bool
Whether to have the sphere inside or outside the mesh.
normals : (n, 3) float or None
Normals of the mesh at the given points
if is None computed automatically.
Returns
----------
centers : (n,3) float
Centers of spheres
radii : (n,) float
Radii of spheres
"""
points = np.asanyarray(points, dtype=np.float64)
if not util.is_shape(points, (-1, 3)):
raise ValueError('points must be (n,3)!')
if normals is not None:
normals = np.asanyarray(normals, dtype=np.float64)
if not util.is_shape(normals, (-1, 3)):
raise ValueError('normals must be (n,3)!')
if len(points) != len(normals):
raise ValueError('number of points must equal number of normals!')
else:
normals = mesh.face_normals[closest_point(mesh, points)[2]]
if inwards:
normals = -normals
# Find initial tangent spheres
distances = longest_ray(mesh, points, normals)
radii = distances * 0.5
not_converged = np.ones(len(points), dtype=bool) # boolean mask
# If ray is infinite, find the vertex which is furthest from our point
# when projected onto the ray. I.e. find v which maximises
# (v-p).n = v.n - p.n.
# We use a loop rather a vectorised approach to reduce memory cost
# it also seems to run faster.
for i in np.where(np.isinf(distances))[0]:
projections = np.dot(mesh.vertices - points[i], normals[i])
# If no points lie outside the tangent plane, then the radius is infinite
# otherwise we have a point outside the tangent plane, take the one with maximal
# projection
if projections.max() < tol.planar:
radii[i] = np.inf
not_converged[i] = False
else:
vertex = mesh.vertices[projections.argmax()]
radii[i] = (np.dot(vertex - points[i], vertex - points[i]) /
(2 * np.dot(vertex - points[i], normals[i])))
# Compute centers
centers = points + normals * np.nan_to_num(radii.reshape(-1, 1))
centers[np.isinf(radii)] = [np.nan, np.nan, np.nan]
# Our iterative process terminates when the difference in sphere
# radius is less than threshold*D
D = np.linalg.norm(mesh.bounds[1] - mesh.bounds[0])
convergence_threshold = threshold * D
n_iter = 0
while not_converged.sum() > 0 and n_iter < max_iter:
n_iter += 1
n_points, n_dists, n_faces = mesh.nearest.on_surface(
centers[not_converged])
# If the distance to the nearest point is the same as the distance
# to the start point then we are done.
done = np.abs(
n_dists -
np.linalg.norm(
centers[not_converged] -
points[not_converged],
axis=1)) < tol.planar
not_converged[np.where(not_converged)[0][done]] = False
# Otherwise find the radius and center of the sphere tangent to the mesh
# at the point and the nearest point.
diff = n_points[~done] - points[not_converged]
old_radii = radii[not_converged].copy()
# np.einsum produces element wise dot product
radii[not_converged] = (np.einsum('ij, ij->i',
diff,
diff) /
(2 * np.einsum('ij, ij->i',
diff,
normals[not_converged])))
centers[not_converged] = points[not_converged] + \
normals[not_converged] * radii[not_converged].reshape(-1, 1)
# If change in radius is less than threshold we have converged
cvged = old_radii - radii[not_converged] < convergence_threshold
not_converged[np.where(not_converged)[0][cvged]] = False
return centers, radii
def thickness(mesh,
points,
exterior=False,
normals=None,
method='max_sphere'):
"""
Find the thickness of the mesh at the given points.
Parameters
----------
points : (n, 3) float
Points in space
exterior : bool
Whether to compute the exterior thickness
(a.k.a. reach)
normals : (n, 3) float
Normals of the mesh at the given points
If is None computed automatically.
method : string
One of 'max_sphere' or 'ray'
Returns
----------
thickness : (n,) float
Thickness at given points.
"""
points = np.asanyarray(points, dtype=np.float64)
if not util.is_shape(points, (-1, 3)):
raise ValueError('points must be (n,3)!')
if normals is not None:
normals = np.asanyarray(normals, dtype=np.float64)
if not util.is_shape(normals, (-1, 3)):
raise ValueError('normals must be (n,3)!')
if len(points) != len(normals):
raise ValueError('number of points must equal number of normals!')
else:
normals = mesh.face_normals[closest_point(mesh, points)[2]]
if method == 'max_sphere':
centers, radius = max_tangent_sphere(mesh=mesh,
points=points,
inwards=not exterior,
normals=normals)
thickness = radius * 2
return thickness
elif method == 'ray':
if exterior:
return longest_ray(mesh, points, normals)
else:
return longest_ray(mesh, points, -normals)
else:
raise ValueError('Invalid method, use "max_sphere" or "ray"')
| mit |
MSOpenTech/edx-platform | lms/djangoapps/bulk_email/tests/test_err_handling.py | 12 | 17239 | # -*- coding: utf-8 -*-
"""
Unit tests for handling email sending errors
"""
from itertools import cycle
from celery.states import SUCCESS, RETRY
from django.conf import settings
from django.core.management import call_command
from django.core.urlresolvers import reverse
from django.db import DatabaseError
import json
from mock import patch, Mock
from smtplib import SMTPDataError, SMTPServerDisconnected, SMTPConnectError
from bulk_email.models import CourseEmail, SEND_TO_ALL
from bulk_email.tasks import perform_delegate_email_batches, send_course_email
from instructor_task.models import InstructorTask
from instructor_task.subtasks import (
initialize_subtask_info,
SubtaskStatus,
check_subtask_is_valid,
update_subtask_status,
DuplicateTaskException,
MAX_DATABASE_LOCK_RETRIES,
)
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from student.tests.factories import UserFactory, AdminFactory, CourseEnrollmentFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class EmailTestException(Exception):
"""Mock exception for email testing."""
pass
@patch('bulk_email.models.html_to_text', Mock(return_value='Mocking CourseEmail.text_message'))
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': False})
class TestEmailErrors(ModuleStoreTestCase):
"""
Test that errors from sending email are handled properly.
"""
def setUp(self):
super(TestEmailErrors, self).setUp()
course_title = u"ẗëṡẗ title イ乇丂イ ᄊ乇丂丂ムg乇 キo尺 ムレレ тэѕт мэѕѕаБэ"
self.course = CourseFactory.create(display_name=course_title)
self.instructor = AdminFactory.create()
self.client.login(username=self.instructor.username, password="test")
# load initial content (since we don't run migrations as part of tests):
call_command("loaddata", "course_email_template.json")
self.url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})
self.send_mail_url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
self.success_content = {
'course_id': self.course.id.to_deprecated_string(),
'success': True,
}
@patch('bulk_email.tasks.get_connection', autospec=True)
@patch('bulk_email.tasks.send_course_email.retry')
def test_data_err_retry(self, retry, get_conn):
"""
Test that celery handles transient SMTPDataErrors by retrying.
"""
get_conn.return_value.send_messages.side_effect = SMTPDataError(455, "Throttling: Sending rate exceeded")
test_email = {
'action': 'Send email',
'send_to': 'myself',
'subject': 'test subject for myself',
'message': 'test message for myself'
}
response = self.client.post(self.send_mail_url, test_email)
self.assertEquals(json.loads(response.content), self.success_content)
# Test that we retry upon hitting a 4xx error
self.assertTrue(retry.called)
(__, kwargs) = retry.call_args
exc = kwargs['exc']
self.assertIsInstance(exc, SMTPDataError)
@patch('bulk_email.tasks.get_connection', autospec=True)
@patch('bulk_email.tasks.update_subtask_status')
@patch('bulk_email.tasks.send_course_email.retry')
def test_data_err_fail(self, retry, result, get_conn):
"""
Test that celery handles permanent SMTPDataErrors by failing and not retrying.
"""
# have every fourth email fail due to blacklisting:
get_conn.return_value.send_messages.side_effect = cycle([SMTPDataError(554, "Email address is blacklisted"),
None, None, None])
students = [UserFactory() for _ in xrange(settings.BULK_EMAIL_EMAILS_PER_TASK)]
for student in students:
CourseEnrollmentFactory.create(user=student, course_id=self.course.id)
test_email = {
'action': 'Send email',
'send_to': 'all',
'subject': 'test subject for all',
'message': 'test message for all'
}
response = self.client.post(self.send_mail_url, test_email)
self.assertEquals(json.loads(response.content), self.success_content)
# We shouldn't retry when hitting a 5xx error
self.assertFalse(retry.called)
# Test that after the rejected email, the rest still successfully send
((_entry_id, _current_task_id, subtask_status), _kwargs) = result.call_args
self.assertEquals(subtask_status.skipped, 0)
expected_fails = int((settings.BULK_EMAIL_EMAILS_PER_TASK + 3) / 4.0)
self.assertEquals(subtask_status.failed, expected_fails)
self.assertEquals(subtask_status.succeeded, settings.BULK_EMAIL_EMAILS_PER_TASK - expected_fails)
@patch('bulk_email.tasks.get_connection', autospec=True)
@patch('bulk_email.tasks.send_course_email.retry')
def test_disconn_err_retry(self, retry, get_conn):
"""
Test that celery handles SMTPServerDisconnected by retrying.
"""
get_conn.return_value.open.side_effect = SMTPServerDisconnected(425, "Disconnecting")
test_email = {
'action': 'Send email',
'send_to': 'myself',
'subject': 'test subject for myself',
'message': 'test message for myself'
}
response = self.client.post(self.send_mail_url, test_email)
self.assertEquals(json.loads(response.content), self.success_content)
self.assertTrue(retry.called)
(__, kwargs) = retry.call_args
exc = kwargs['exc']
self.assertIsInstance(exc, SMTPServerDisconnected)
@patch('bulk_email.tasks.get_connection', autospec=True)
@patch('bulk_email.tasks.send_course_email.retry')
def test_conn_err_retry(self, retry, get_conn):
"""
Test that celery handles SMTPConnectError by retrying.
"""
get_conn.return_value.open.side_effect = SMTPConnectError(424, "Bad Connection")
test_email = {
'action': 'Send email',
'send_to': 'myself',
'subject': 'test subject for myself',
'message': 'test message for myself'
}
response = self.client.post(self.send_mail_url, test_email)
self.assertEquals(json.loads(response.content), self.success_content)
self.assertTrue(retry.called)
(__, kwargs) = retry.call_args
exc = kwargs['exc']
self.assertIsInstance(exc, SMTPConnectError)
@patch('bulk_email.tasks.SubtaskStatus.increment')
@patch('bulk_email.tasks.log')
def test_nonexistent_email(self, mock_log, result):
"""
Tests retries when the email doesn't exist
"""
# create an InstructorTask object to pass through
course_id = self.course.id
entry = InstructorTask.create(course_id, "task_type", "task_key", "task_input", self.instructor)
task_input = {"email_id": -1}
with self.assertRaises(CourseEmail.DoesNotExist):
perform_delegate_email_batches(entry.id, course_id, task_input, "action_name") # pylint: disable=no-member
((log_str, __, email_id), __) = mock_log.warning.call_args
self.assertTrue(mock_log.warning.called)
self.assertIn('Failed to get CourseEmail with id', log_str)
self.assertEqual(email_id, -1)
self.assertFalse(result.called)
def test_nonexistent_course(self):
"""
Tests exception when the course in the email doesn't exist
"""
course_id = SlashSeparatedCourseKey("I", "DONT", "EXIST")
email = CourseEmail(course_id=course_id)
email.save()
entry = InstructorTask.create(course_id, "task_type", "task_key", "task_input", self.instructor)
task_input = {"email_id": email.id} # pylint: disable=no-member
# (?i) is a regex for ignore case
with self.assertRaisesRegexp(ValueError, r"(?i)course not found"):
perform_delegate_email_batches(entry.id, course_id, task_input, "action_name") # pylint: disable=no-member
def test_nonexistent_to_option(self):
"""
Tests exception when the to_option in the email doesn't exist
"""
email = CourseEmail(course_id=self.course.id, to_option="IDONTEXIST")
email.save()
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
task_input = {"email_id": email.id} # pylint: disable=no-member
with self.assertRaisesRegexp(Exception, 'Unexpected bulk email TO_OPTION found: IDONTEXIST'):
perform_delegate_email_batches(entry.id, self.course.id, task_input, "action_name") # pylint: disable=no-member
def test_wrong_course_id_in_task(self):
"""
Tests exception when the course_id in task is not the same as one explicitly passed in.
"""
email = CourseEmail(course_id=self.course.id, to_option=SEND_TO_ALL)
email.save()
entry = InstructorTask.create("bogus/task/id", "task_type", "task_key", "task_input", self.instructor)
task_input = {"email_id": email.id} # pylint: disable=no-member
with self.assertRaisesRegexp(ValueError, 'does not match task value'):
perform_delegate_email_batches(entry.id, self.course.id, task_input, "action_name") # pylint: disable=no-member
def test_wrong_course_id_in_email(self):
"""
Tests exception when the course_id in CourseEmail is not the same as one explicitly passed in.
"""
email = CourseEmail(course_id=SlashSeparatedCourseKey("bogus", "course", "id"), to_option=SEND_TO_ALL)
email.save()
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
task_input = {"email_id": email.id} # pylint: disable=no-member
with self.assertRaisesRegexp(ValueError, 'does not match email value'):
perform_delegate_email_batches(entry.id, self.course.id, task_input, "action_name") # pylint: disable=no-member
def test_send_email_undefined_subtask(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id # pylint: disable=no-member
to_list = ['[email protected]']
global_email_context = {'course_title': 'dummy course'}
subtask_id = "subtask-id-value"
subtask_status = SubtaskStatus.create(subtask_id)
email_id = 1001
with self.assertRaisesRegexp(DuplicateTaskException, 'unable to find subtasks of instructor task'):
send_course_email(entry_id, email_id, to_list, global_email_context, subtask_status.to_dict())
def test_send_email_missing_subtask(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id # pylint: disable=no-member
to_list = ['[email protected]']
global_email_context = {'course_title': 'dummy course'}
subtask_id = "subtask-id-value"
initialize_subtask_info(entry, "emailed", 100, [subtask_id])
different_subtask_id = "bogus-subtask-id-value"
subtask_status = SubtaskStatus.create(different_subtask_id)
bogus_email_id = 1001
with self.assertRaisesRegexp(DuplicateTaskException, 'unable to find status for subtask of instructor task'):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, subtask_status.to_dict())
def test_send_email_completed_subtask(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id # pylint: disable=no-member
subtask_id = "subtask-id-value"
initialize_subtask_info(entry, "emailed", 100, [subtask_id])
subtask_status = SubtaskStatus.create(subtask_id, state=SUCCESS)
update_subtask_status(entry_id, subtask_id, subtask_status)
bogus_email_id = 1001
to_list = ['[email protected]']
global_email_context = {'course_title': 'dummy course'}
new_subtask_status = SubtaskStatus.create(subtask_id)
with self.assertRaisesRegexp(DuplicateTaskException, 'already completed'):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, new_subtask_status.to_dict())
def test_send_email_running_subtask(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id # pylint: disable=no-member
subtask_id = "subtask-id-value"
initialize_subtask_info(entry, "emailed", 100, [subtask_id])
subtask_status = SubtaskStatus.create(subtask_id)
update_subtask_status(entry_id, subtask_id, subtask_status)
check_subtask_is_valid(entry_id, subtask_id, subtask_status)
bogus_email_id = 1001
to_list = ['[email protected]']
global_email_context = {'course_title': 'dummy course'}
with self.assertRaisesRegexp(DuplicateTaskException, 'already being executed'):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, subtask_status.to_dict())
def test_send_email_retried_subtask(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id # pylint: disable=no-member
subtask_id = "subtask-id-value"
initialize_subtask_info(entry, "emailed", 100, [subtask_id])
subtask_status = SubtaskStatus.create(subtask_id, state=RETRY, retried_nomax=2)
update_subtask_status(entry_id, subtask_id, subtask_status)
bogus_email_id = 1001
to_list = ['[email protected]']
global_email_context = {'course_title': 'dummy course'}
# try running with a clean subtask:
new_subtask_status = SubtaskStatus.create(subtask_id)
with self.assertRaisesRegexp(DuplicateTaskException, 'already retried'):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, new_subtask_status.to_dict())
# try again, with a retried subtask with lower count:
new_subtask_status = SubtaskStatus.create(subtask_id, state=RETRY, retried_nomax=1)
with self.assertRaisesRegexp(DuplicateTaskException, 'already retried'):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, new_subtask_status.to_dict())
def test_send_email_with_locked_instructor_task(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id # pylint: disable=no-member
subtask_id = "subtask-id-locked-model"
initialize_subtask_info(entry, "emailed", 100, [subtask_id])
subtask_status = SubtaskStatus.create(subtask_id)
bogus_email_id = 1001
to_list = ['[email protected]']
global_email_context = {'course_title': 'dummy course'}
with patch('instructor_task.subtasks.InstructorTask.save') as mock_task_save:
mock_task_save.side_effect = DatabaseError
with self.assertRaises(DatabaseError):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, subtask_status.to_dict())
self.assertEquals(mock_task_save.call_count, MAX_DATABASE_LOCK_RETRIES)
def test_send_email_undefined_email(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id # pylint: disable=no-member
to_list = ['[email protected]']
global_email_context = {'course_title': 'dummy course'}
subtask_id = "subtask-id-undefined-email"
initialize_subtask_info(entry, "emailed", 100, [subtask_id])
subtask_status = SubtaskStatus.create(subtask_id)
bogus_email_id = 1001
with self.assertRaises(CourseEmail.DoesNotExist):
# we skip the call that updates subtask status, since we've not set up the InstructorTask
# for the subtask, and it's not important to the test.
with patch('bulk_email.tasks.update_subtask_status'):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, subtask_status.to_dict())
| agpl-3.0 |
kidaa/avmplus | test/performance/metricinfo.py | 8 | 2932 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# This file contains information about the different performance metrics
# It is a python file that is imported into runtests.py
# Only one variable, metric_info is to be defined in this file.
# metric_info is a dictionary with the primary key being the metric name whose value is another
# dictionary.
# This secondary dictionary MUST define the following (string) keys:
# best : when analyzing multiple iterations, how is the "best" value calculated
# valid values are one of [ min | max | mean | median ]
# note that these are NOT strings, but method names
# The following are optional (string) keys:
# desc : A string description of the metric
# name : Display this name instead of the metric name
# unit : Metric Unit
# largerIsFaster : Boolean indicating whether larger values are considered
# to be faster. Defaults to False
# If a test reports a metric not defined in the metric_info dictionary, min is used as the default
import sys
# add parent dir to python module search path
sys.path.append('..')
from util.runtestUtils import mean, median
metric_info = {
'time': {
'best':min,
'unit':'milliseconds',
},
'compile_time': {
'best':min,
'unit':'seconds',
},
'memory':{
'best':max,
'unit':'k',
},
'size':{
'best':min,
'unit':'bytes',
},
'v8': {
'best':max,
'desc': 'custom v8 normalized metric (hardcoded in the test)',
'largerIsFaster':True
},
'iterations/second':{
'best':max,
'largerIsFaster':True,
},
# steps is a metric output by the avm when compiled with --enable-count-steps
'steps':{
'best':mean,
'desc':'internal steps reported by vm composed of call_count+loop_count. See Bug 568933 for details'
},
# vprof / perfm metrics
'vprof-compile-time': {
'best':min,
'name':'vprof: compile (time)'
},
'vprof-code-size' : {
'best':min,
'name':'vprof: code size (bytes)'
},
'vprof-verify-time' : {
'best':min,
'name':'vprof: verify & IR gen (time)'
},
'vprof-ir-bytes': {
'best':min,
'name':'vprof: mir/lir bytes'
},
'vprof-ir-time': {
'best':min,
'name':'vprof: mir/lir (# of inst)'
},
'vprof-count': {
'best':min,
'name':'vprof: count'
}
}
| mpl-2.0 |
IptvBrasilGroup/Cleitonleonelcreton.repository | plugin.video.armagedompirata/mechanize/_firefox3cookiejar.py | 134 | 8345 | """Firefox 3 "cookies.sqlite" cookie persistence.
Copyright 2008 John J Lee <[email protected]>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
import logging
import time
from _clientcookie import CookieJar, Cookie, MappingIterator
from _util import isstringlike, experimental
debug = logging.getLogger("mechanize.cookies").debug
class Firefox3CookieJar(CookieJar):
"""Firefox 3 cookie jar.
The cookies are stored in Firefox 3's "cookies.sqlite" format.
Constructor arguments:
filename: filename of cookies.sqlite (typically found at the top level
of a firefox profile directory)
autoconnect: as a convenience, connect to the SQLite cookies database at
Firefox3CookieJar construction time (default True)
policy: an object satisfying the mechanize.CookiePolicy interface
Note that this is NOT a FileCookieJar, and there are no .load(),
.save() or .restore() methods. The database is in sync with the
cookiejar object's state after each public method call.
Following Firefox's own behaviour, session cookies are never saved to
the database.
The file is created, and an sqlite database written to it, if it does
not already exist. The moz_cookies database table is created if it does
not already exist.
"""
# XXX
# handle DatabaseError exceptions
# add a FileCookieJar (explicit .save() / .revert() / .load() methods)
def __init__(self, filename, autoconnect=True, policy=None):
experimental("Firefox3CookieJar is experimental code")
CookieJar.__init__(self, policy)
if filename is not None and not isstringlike(filename):
raise ValueError("filename must be string-like")
self.filename = filename
self._conn = None
if autoconnect:
self.connect()
def connect(self):
import sqlite3 # not available in Python 2.4 stdlib
self._conn = sqlite3.connect(self.filename)
self._conn.isolation_level = "DEFERRED"
self._create_table_if_necessary()
def close(self):
self._conn.close()
def _transaction(self, func):
try:
cur = self._conn.cursor()
try:
result = func(cur)
finally:
cur.close()
except:
self._conn.rollback()
raise
else:
self._conn.commit()
return result
def _execute(self, query, params=()):
return self._transaction(lambda cur: cur.execute(query, params))
def _query(self, query, params=()):
# XXX should we bother with a transaction?
cur = self._conn.cursor()
try:
cur.execute(query, params)
return cur.fetchall()
finally:
cur.close()
def _create_table_if_necessary(self):
self._execute("""\
CREATE TABLE IF NOT EXISTS moz_cookies (id INTEGER PRIMARY KEY, name TEXT,
value TEXT, host TEXT, path TEXT,expiry INTEGER,
lastAccessed INTEGER, isSecure INTEGER, isHttpOnly INTEGER)""")
def _cookie_from_row(self, row):
(pk, name, value, domain, path, expires,
last_accessed, secure, http_only) = row
version = 0
domain = domain.encode("ascii", "ignore")
path = path.encode("ascii", "ignore")
name = name.encode("ascii", "ignore")
value = value.encode("ascii", "ignore")
secure = bool(secure)
# last_accessed isn't a cookie attribute, so isn't added to rest
rest = {}
if http_only:
rest["HttpOnly"] = None
if name == "":
name = value
value = None
initial_dot = domain.startswith(".")
domain_specified = initial_dot
discard = False
if expires == "":
expires = None
discard = True
return Cookie(version, name, value,
None, False,
domain, domain_specified, initial_dot,
path, False,
secure,
expires,
discard,
None,
None,
rest)
def clear(self, domain=None, path=None, name=None):
CookieJar.clear(self, domain, path, name)
where_parts = []
sql_params = []
if domain is not None:
where_parts.append("host = ?")
sql_params.append(domain)
if path is not None:
where_parts.append("path = ?")
sql_params.append(path)
if name is not None:
where_parts.append("name = ?")
sql_params.append(name)
where = " AND ".join(where_parts)
if where:
where = " WHERE " + where
def clear(cur):
cur.execute("DELETE FROM moz_cookies%s" % where,
tuple(sql_params))
self._transaction(clear)
def _row_from_cookie(self, cookie, cur):
expires = cookie.expires
if cookie.discard:
expires = ""
domain = unicode(cookie.domain)
path = unicode(cookie.path)
name = unicode(cookie.name)
value = unicode(cookie.value)
secure = bool(int(cookie.secure))
if value is None:
value = name
name = ""
last_accessed = int(time.time())
http_only = cookie.has_nonstandard_attr("HttpOnly")
query = cur.execute("""SELECT MAX(id) + 1 from moz_cookies""")
pk = query.fetchone()[0]
if pk is None:
pk = 1
return (pk, name, value, domain, path, expires,
last_accessed, secure, http_only)
def set_cookie(self, cookie):
if cookie.discard:
CookieJar.set_cookie(self, cookie)
return
def set_cookie(cur):
# XXX
# is this RFC 2965-correct?
# could this do an UPDATE instead?
row = self._row_from_cookie(cookie, cur)
name, unused, domain, path = row[1:5]
cur.execute("""\
DELETE FROM moz_cookies WHERE host = ? AND path = ? AND name = ?""",
(domain, path, name))
cur.execute("""\
INSERT INTO moz_cookies VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
""", row)
self._transaction(set_cookie)
def __iter__(self):
# session (non-persistent) cookies
for cookie in MappingIterator(self._cookies):
yield cookie
# persistent cookies
for row in self._query("""\
SELECT * FROM moz_cookies ORDER BY name, path, host"""):
yield self._cookie_from_row(row)
def _cookies_for_request(self, request):
session_cookies = CookieJar._cookies_for_request(self, request)
def get_cookies(cur):
query = cur.execute("SELECT host from moz_cookies")
domains = [row[0] for row in query.fetchall()]
cookies = []
for domain in domains:
cookies += self._persistent_cookies_for_domain(domain,
request, cur)
return cookies
persistent_coookies = self._transaction(get_cookies)
return session_cookies + persistent_coookies
def _persistent_cookies_for_domain(self, domain, request, cur):
cookies = []
if not self._policy.domain_return_ok(domain, request):
return []
debug("Checking %s for cookies to return", domain)
query = cur.execute("""\
SELECT * from moz_cookies WHERE host = ? ORDER BY path""",
(domain,))
cookies = [self._cookie_from_row(row) for row in query.fetchall()]
last_path = None
r = []
for cookie in cookies:
if (cookie.path != last_path and
not self._policy.path_return_ok(cookie.path, request)):
last_path = cookie.path
continue
if not self._policy.return_ok(cookie, request):
debug(" not returning cookie")
continue
debug(" it's a match")
r.append(cookie)
return r
| gpl-2.0 |
JonnyWong16/plexpy | lib/apscheduler/triggers/cron/expressions.py | 3 | 9184 | """This module contains the expressions applicable for CronTrigger's fields."""
from calendar import monthrange
import re
from apscheduler.util import asint
__all__ = ('AllExpression', 'RangeExpression', 'WeekdayRangeExpression',
'WeekdayPositionExpression', 'LastDayOfMonthExpression')
WEEKDAYS = ['sun', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat']
MONTHS = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
class AllExpression(object):
value_re = re.compile(r'\*(?:/(?P<step>\d+))?$')
def __init__(self, step=None):
self.step = asint(step)
if self.step == 0:
raise ValueError('Increment must be higher than 0')
def validate_range(self, field_name):
from apscheduler.triggers.cron.fields import MIN_VALUES, MAX_VALUES
value_range = MAX_VALUES[field_name] - MIN_VALUES[field_name]
if self.step and self.step > value_range:
raise ValueError('the step value ({}) is higher than the total range of the '
'expression ({})'.format(self.step, value_range))
def get_next_value(self, date, field):
start = field.get_value(date)
minval = field.get_min(date)
maxval = field.get_max(date)
start = max(start, minval)
if not self.step:
next = start
else:
distance_to_next = (self.step - (start - minval)) % self.step
next = start + distance_to_next
if next <= maxval:
return next
def __eq__(self, other):
return isinstance(other, self.__class__) and self.step == other.step
def __str__(self):
if self.step:
return '*/%d' % self.step
return '*'
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self.step)
class RangeExpression(AllExpression):
value_re = re.compile(
r'(?P<first>\d+)(?:-(?P<last>\d+))?(?:/(?P<step>\d+))?$')
def __init__(self, first, last=None, step=None):
super(RangeExpression, self).__init__(step)
first = asint(first)
last = asint(last)
if last is None and step is None:
last = first
if last is not None and first > last:
raise ValueError('The minimum value in a range must not be higher than the maximum')
self.first = first
self.last = last
def validate_range(self, field_name):
from apscheduler.triggers.cron.fields import MIN_VALUES, MAX_VALUES
super(RangeExpression, self).validate_range(field_name)
if self.first < MIN_VALUES[field_name]:
raise ValueError('the first value ({}) is lower than the minimum value ({})'
.format(self.first, MIN_VALUES[field_name]))
if self.last is not None and self.last > MAX_VALUES[field_name]:
raise ValueError('the last value ({}) is higher than the maximum value ({})'
.format(self.last, MAX_VALUES[field_name]))
value_range = (self.last or MAX_VALUES[field_name]) - self.first
if self.step and self.step > value_range:
raise ValueError('the step value ({}) is higher than the total range of the '
'expression ({})'.format(self.step, value_range))
def get_next_value(self, date, field):
startval = field.get_value(date)
minval = field.get_min(date)
maxval = field.get_max(date)
# Apply range limits
minval = max(minval, self.first)
maxval = min(maxval, self.last) if self.last is not None else maxval
nextval = max(minval, startval)
# Apply the step if defined
if self.step:
distance_to_next = (self.step - (nextval - minval)) % self.step
nextval += distance_to_next
return nextval if nextval <= maxval else None
def __eq__(self, other):
return (isinstance(other, self.__class__) and self.first == other.first and
self.last == other.last)
def __str__(self):
if self.last != self.first and self.last is not None:
range = '%d-%d' % (self.first, self.last)
else:
range = str(self.first)
if self.step:
return '%s/%d' % (range, self.step)
return range
def __repr__(self):
args = [str(self.first)]
if self.last != self.first and self.last is not None or self.step:
args.append(str(self.last))
if self.step:
args.append(str(self.step))
return "%s(%s)" % (self.__class__.__name__, ', '.join(args))
class MonthRangeExpression(RangeExpression):
value_re = re.compile(r'(?P<first>[a-z]+)(?:-(?P<last>[a-z]+))?', re.IGNORECASE)
def __init__(self, first, last=None):
try:
first_num = MONTHS.index(first.lower()) + 1
except ValueError:
raise ValueError('Invalid month name "%s"' % first)
if last:
try:
last_num = MONTHS.index(last.lower()) + 1
except ValueError:
raise ValueError('Invalid month name "%s"' % last)
else:
last_num = None
super(MonthRangeExpression, self).__init__(first_num, last_num)
def __str__(self):
if self.last != self.first and self.last is not None:
return '%s-%s' % (MONTHS[self.first - 1], MONTHS[self.last - 1])
return MONTHS[self.first - 1]
def __repr__(self):
args = ["'%s'" % MONTHS[self.first]]
if self.last != self.first and self.last is not None:
args.append("'%s'" % MONTHS[self.last - 1])
return "%s(%s)" % (self.__class__.__name__, ', '.join(args))
class WeekdayRangeExpression(RangeExpression):
value_re = re.compile(r'(?P<first>[a-z]+)(?:-(?P<last>[a-z]+))?', re.IGNORECASE)
def __init__(self, first, last=None):
try:
first_num = WEEKDAYS.index(first.lower())
except ValueError:
raise ValueError('Invalid weekday name "%s"' % first)
if last:
try:
last_num = WEEKDAYS.index(last.lower())
except ValueError:
raise ValueError('Invalid weekday name "%s"' % last)
else:
last_num = None
super(WeekdayRangeExpression, self).__init__(first_num, last_num)
def __str__(self):
if self.last != self.first and self.last is not None:
return '%s-%s' % (WEEKDAYS[self.first], WEEKDAYS[self.last])
return WEEKDAYS[self.first]
def __repr__(self):
args = ["'%s'" % WEEKDAYS[self.first]]
if self.last != self.first and self.last is not None:
args.append("'%s'" % WEEKDAYS[self.last])
return "%s(%s)" % (self.__class__.__name__, ', '.join(args))
class WeekdayPositionExpression(AllExpression):
options = ['1st', '2nd', '3rd', '4th', '5th', 'last']
value_re = re.compile(r'(?P<option_name>%s) +(?P<weekday_name>(?:\d+|\w+))' %
'|'.join(options), re.IGNORECASE)
def __init__(self, option_name, weekday_name):
super(WeekdayPositionExpression, self).__init__(None)
try:
self.option_num = self.options.index(option_name.lower())
except ValueError:
raise ValueError('Invalid weekday position "%s"' % option_name)
try:
self.weekday = WEEKDAYS.index(weekday_name.lower())
except ValueError:
raise ValueError('Invalid weekday name "%s"' % weekday_name)
def get_next_value(self, date, field):
# Figure out the weekday of the month's first day and the number of days in that month
first_day_wday, last_day = monthrange(date.year, date.month)
# Calculate which day of the month is the first of the target weekdays
first_hit_day = self.weekday - first_day_wday + 1
if first_hit_day <= 0:
first_hit_day += 7
# Calculate what day of the month the target weekday would be
if self.option_num < 5:
target_day = first_hit_day + self.option_num * 7
else:
target_day = first_hit_day + ((last_day - first_hit_day) // 7) * 7
if target_day <= last_day and target_day >= date.day:
return target_day
def __eq__(self, other):
return (super(WeekdayPositionExpression, self).__eq__(other) and
self.option_num == other.option_num and self.weekday == other.weekday)
def __str__(self):
return '%s %s' % (self.options[self.option_num], WEEKDAYS[self.weekday])
def __repr__(self):
return "%s('%s', '%s')" % (self.__class__.__name__, self.options[self.option_num],
WEEKDAYS[self.weekday])
class LastDayOfMonthExpression(AllExpression):
value_re = re.compile(r'last', re.IGNORECASE)
def __init__(self):
super(LastDayOfMonthExpression, self).__init__(None)
def get_next_value(self, date, field):
return monthrange(date.year, date.month)[1]
def __str__(self):
return 'last'
def __repr__(self):
return "%s()" % self.__class__.__name__
| gpl-3.0 |
aforalee/RRally | tests/unit/plugins/openstack/context/keystone/test_roles.py | 13 | 5273 | # Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally import exceptions
from rally.plugins.openstack.context.keystone import roles
from tests.unit import fakes
from tests.unit import test
CTX = "rally.plugins.openstack.context.keystone.roles"
class RoleGeneratorTestCase(test.TestCase):
def create_default_roles_and_patch_add_remove_functions(self, fc):
fc.keystone().roles.add_user_role = mock.MagicMock()
fc.keystone().roles.remove_user_role = mock.MagicMock()
fc.keystone().roles.create("r1", "test_role1")
fc.keystone().roles.create("r2", "test_role2")
self.assertEqual(2, len(fc.keystone().roles.list()))
@property
def context(self):
return {
"config": {
"roles": [
"test_role1",
"test_role2"
]
},
"admin": {"endpoint": mock.MagicMock()},
"task": mock.MagicMock()
}
@mock.patch("%s.osclients" % CTX)
def test_add_role(self, mock_osclients):
fc = fakes.FakeClients()
mock_osclients.Clients.return_value = fc
self.create_default_roles_and_patch_add_remove_functions(fc)
ctx = roles.RoleGenerator(self.context)
ctx.context["users"] = [{"id": "u1", "tenant_id": "t1"},
{"id": "u2", "tenant_id": "t2"}]
result = ctx._add_role(mock.MagicMock(),
self.context["config"]["roles"][0])
expected = {"id": "r1", "name": "test_role1"}
self.assertEqual(expected, result)
@mock.patch("%s.osclients" % CTX)
def test_add_role_which_does_not_exist(self, mock_osclients):
fc = fakes.FakeClients()
mock_osclients.Clients.return_value = fc
self.create_default_roles_and_patch_add_remove_functions(fc)
ctx = roles.RoleGenerator(self.context)
ctx.context["users"] = [{"id": "u1", "tenant_id": "t1"},
{"id": "u2", "tenant_id": "t2"}]
ex = self.assertRaises(exceptions.NoSuchRole, ctx._add_role,
mock.MagicMock(), "unknown_role")
expected = "There is no role with name `unknown_role`."
self.assertEqual(expected, str(ex))
@mock.patch("%s.osclients" % CTX)
def test_remove_role(self, mock_osclients):
role = mock.MagicMock()
fc = fakes.FakeClients()
mock_osclients.Clients.return_value = fc
self.create_default_roles_and_patch_add_remove_functions(fc)
ctx = roles.RoleGenerator(self.context)
ctx.context["users"] = [{"id": "u1", "tenant_id": "t1"},
{"id": "u2", "tenant_id": "t2"}]
ctx._remove_role(mock.MagicMock(), role)
calls = [
mock.call("u1", role["id"], tenant="t1"),
mock.call("u2", role["id"], tenant="t2"),
]
mock_keystone = mock_osclients.Clients().keystone()
mock_keystone.roles.remove_user_role.assert_has_calls(calls)
@mock.patch("%s.osclients" % CTX)
def test_setup_and_cleanup(self, mock_osclients):
fc = fakes.FakeClients()
mock_osclients.Clients.return_value = fc
self.create_default_roles_and_patch_add_remove_functions(fc)
with roles.RoleGenerator(self.context) as ctx:
ctx.context["users"] = [{"id": "u1", "tenant_id": "t1"},
{"id": "u2", "tenant_id": "t2"}]
ctx.setup()
calls = [
mock.call("u1", "r1", tenant="t1"),
mock.call("u2", "r1", tenant="t2"),
mock.call("u1", "r2", tenant="t1"),
mock.call("u2", "r2", tenant="t2")
]
fc.keystone().roles.add_user_role.assert_has_calls(calls)
self.assertEqual(
4, fc.keystone().roles.add_user_role.call_count)
self.assertEqual(
0, fc.keystone().roles.remove_user_role.call_count)
self.assertEqual(2, len(ctx.context["roles"]))
self.assertEqual(2, len(fc.keystone().roles.list()))
# Cleanup (called by content manager)
self.assertEqual(2, len(fc.keystone().roles.list()))
self.assertEqual(4, fc.keystone().roles.add_user_role.call_count)
self.assertEqual(4, fc.keystone().roles.remove_user_role.call_count)
calls = [
mock.call("u1", "r1", tenant="t1"),
mock.call("u2", "r1", tenant="t2"),
mock.call("u1", "r2", tenant="t1"),
mock.call("u2", "r2", tenant="t2")
]
fc.keystone().roles.remove_user_role.assert_has_calls(calls)
| apache-2.0 |
gormanb/mongo-python-driver | bson/__init__.py | 14 | 32058 | # Copyright 2009-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BSON (Binary JSON) encoding and decoding.
"""
import calendar
import collections
import datetime
import itertools
import re
import struct
import sys
import uuid
from codecs import (utf_8_decode as _utf_8_decode,
utf_8_encode as _utf_8_encode)
from bson.binary import (Binary, OLD_UUID_SUBTYPE,
JAVA_LEGACY, CSHARP_LEGACY,
UUIDLegacy)
from bson.code import Code
from bson.codec_options import CodecOptions, DEFAULT_CODEC_OPTIONS
from bson.dbref import DBRef
from bson.errors import (InvalidBSON,
InvalidDocument,
InvalidStringData)
from bson.int64 import Int64
from bson.max_key import MaxKey
from bson.min_key import MinKey
from bson.objectid import ObjectId
from bson.py3compat import (b,
PY3,
iteritems,
text_type,
string_type,
reraise)
from bson.regex import Regex
from bson.son import SON, RE_TYPE
from bson.timestamp import Timestamp
from bson.tz_util import utc
try:
from bson import _cbson
_USE_C = True
except ImportError:
_USE_C = False
EPOCH_AWARE = datetime.datetime.fromtimestamp(0, utc)
EPOCH_NAIVE = datetime.datetime.utcfromtimestamp(0)
BSONNUM = b"\x01" # Floating point
BSONSTR = b"\x02" # UTF-8 string
BSONOBJ = b"\x03" # Embedded document
BSONARR = b"\x04" # Array
BSONBIN = b"\x05" # Binary
BSONUND = b"\x06" # Undefined
BSONOID = b"\x07" # ObjectId
BSONBOO = b"\x08" # Boolean
BSONDAT = b"\x09" # UTC Datetime
BSONNUL = b"\x0A" # Null
BSONRGX = b"\x0B" # Regex
BSONREF = b"\x0C" # DBRef
BSONCOD = b"\x0D" # Javascript code
BSONSYM = b"\x0E" # Symbol
BSONCWS = b"\x0F" # Javascript code with scope
BSONINT = b"\x10" # 32bit int
BSONTIM = b"\x11" # Timestamp
BSONLON = b"\x12" # 64bit int
BSONMIN = b"\xFF" # Min key
BSONMAX = b"\x7F" # Max key
_UNPACK_FLOAT = struct.Struct("<d").unpack
_UNPACK_INT = struct.Struct("<i").unpack
_UNPACK_LENGTH_SUBTYPE = struct.Struct("<iB").unpack
_UNPACK_LONG = struct.Struct("<q").unpack
_UNPACK_TIMESTAMP = struct.Struct("<II").unpack
def _get_int(data, position, dummy0, dummy1):
"""Decode a BSON int32 to python int."""
end = position + 4
return _UNPACK_INT(data[position:end])[0], end
def _get_c_string(data, position, opts):
"""Decode a BSON 'C' string to python unicode string."""
end = data.index(b"\x00", position)
return _utf_8_decode(data[position:end],
opts.unicode_decode_error_handler, True)[0], end + 1
def _get_float(data, position, dummy0, dummy1):
"""Decode a BSON double to python float."""
end = position + 8
return _UNPACK_FLOAT(data[position:end])[0], end
def _get_string(data, position, obj_end, opts):
"""Decode a BSON string to python unicode string."""
length = _UNPACK_INT(data[position:position + 4])[0]
position += 4
if length < 1 or obj_end - position < length:
raise InvalidBSON("invalid string length")
end = position + length - 1
if data[end:end + 1] != b"\x00":
raise InvalidBSON("invalid end of string")
return _utf_8_decode(data[position:end],
opts.unicode_decode_error_handler, True)[0], end + 1
def _get_object(data, position, obj_end, opts):
"""Decode a BSON subdocument to opts.document_class or bson.dbref.DBRef."""
obj_size = _UNPACK_INT(data[position:position + 4])[0]
end = position + obj_size - 1
if data[end:position + obj_size] != b"\x00":
raise InvalidBSON("bad eoo")
if end >= obj_end:
raise InvalidBSON("invalid object length")
obj = _elements_to_dict(data, position + 4, end, opts)
position += obj_size
if "$ref" in obj:
return (DBRef(obj.pop("$ref"), obj.pop("$id", None),
obj.pop("$db", None), obj), position)
return obj, position
def _get_array(data, position, obj_end, opts):
"""Decode a BSON array to python list."""
size = _UNPACK_INT(data[position:position + 4])[0]
end = position + size - 1
if data[end:end + 1] != b"\x00":
raise InvalidBSON("bad eoo")
position += 4
end -= 1
result = []
# Avoid doing global and attibute lookups in the loop.
append = result.append
index = data.index
getter = _ELEMENT_GETTER
while position < end:
element_type = data[position:position + 1]
# Just skip the keys.
position = index(b'\x00', position) + 1
value, position = getter[element_type](data, position, obj_end, opts)
append(value)
return result, position + 1
def _get_binary(data, position, dummy, opts):
"""Decode a BSON binary to bson.binary.Binary or python UUID."""
length, subtype = _UNPACK_LENGTH_SUBTYPE(data[position:position + 5])
position += 5
if subtype == 2:
length2 = _UNPACK_INT(data[position:position + 4])[0]
position += 4
if length2 != length - 4:
raise InvalidBSON("invalid binary (st 2) - lengths don't match!")
length = length2
end = position + length
if subtype in (3, 4):
# Java Legacy
uuid_representation = opts.uuid_representation
if uuid_representation == JAVA_LEGACY:
java = data[position:end]
value = uuid.UUID(bytes=java[0:8][::-1] + java[8:16][::-1])
# C# legacy
elif uuid_representation == CSHARP_LEGACY:
value = uuid.UUID(bytes_le=data[position:end])
# Python
else:
value = uuid.UUID(bytes=data[position:end])
return value, end
# Python3 special case. Decode subtype 0 to 'bytes'.
if PY3 and subtype == 0:
value = data[position:end]
else:
value = Binary(data[position:end], subtype)
return value, end
def _get_oid(data, position, dummy0, dummy1):
"""Decode a BSON ObjectId to bson.objectid.ObjectId."""
end = position + 12
return ObjectId(data[position:end]), end
def _get_boolean(data, position, dummy0, dummy1):
"""Decode a BSON true/false to python True/False."""
end = position + 1
return data[position:end] == b"\x01", end
def _get_date(data, position, dummy, opts):
"""Decode a BSON datetime to python datetime.datetime."""
end = position + 8
millis = _UNPACK_LONG(data[position:end])[0]
diff = ((millis % 1000) + 1000) % 1000
seconds = (millis - diff) / 1000
micros = diff * 1000
if opts.tz_aware:
dt = EPOCH_AWARE + datetime.timedelta(
seconds=seconds, microseconds=micros)
if opts.tzinfo:
dt = dt.astimezone(opts.tzinfo)
else:
dt = EPOCH_NAIVE + datetime.timedelta(
seconds=seconds, microseconds=micros)
return dt, end
def _get_code(data, position, obj_end, opts):
"""Decode a BSON code to bson.code.Code."""
code, position = _get_string(data, position, obj_end, opts)
return Code(code), position
def _get_code_w_scope(data, position, obj_end, opts):
"""Decode a BSON code_w_scope to bson.code.Code."""
code, position = _get_string(data, position + 4, obj_end, opts)
scope, position = _get_object(data, position, obj_end, opts)
return Code(code, scope), position
def _get_regex(data, position, dummy0, opts):
"""Decode a BSON regex to bson.regex.Regex or a python pattern object."""
pattern, position = _get_c_string(data, position, opts)
bson_flags, position = _get_c_string(data, position, opts)
bson_re = Regex(pattern, bson_flags)
return bson_re, position
def _get_ref(data, position, obj_end, opts):
"""Decode (deprecated) BSON DBPointer to bson.dbref.DBRef."""
collection, position = _get_string(data, position, obj_end, opts)
oid, position = _get_oid(data, position, obj_end, opts)
return DBRef(collection, oid), position
def _get_timestamp(data, position, dummy0, dummy1):
"""Decode a BSON timestamp to bson.timestamp.Timestamp."""
end = position + 8
inc, timestamp = _UNPACK_TIMESTAMP(data[position:end])
return Timestamp(timestamp, inc), end
def _get_int64(data, position, dummy0, dummy1):
"""Decode a BSON int64 to bson.int64.Int64."""
end = position + 8
return Int64(_UNPACK_LONG(data[position:end])[0]), end
# Each decoder function's signature is:
# - data: bytes
# - position: int, beginning of object in 'data' to decode
# - obj_end: int, end of object to decode in 'data' if variable-length type
# - opts: a CodecOptions
_ELEMENT_GETTER = {
BSONNUM: _get_float,
BSONSTR: _get_string,
BSONOBJ: _get_object,
BSONARR: _get_array,
BSONBIN: _get_binary,
BSONUND: lambda w, x, y, z: (None, x), # Deprecated undefined
BSONOID: _get_oid,
BSONBOO: _get_boolean,
BSONDAT: _get_date,
BSONNUL: lambda w, x, y, z: (None, x),
BSONRGX: _get_regex,
BSONREF: _get_ref, # Deprecated DBPointer
BSONCOD: _get_code,
BSONSYM: _get_string, # Deprecated symbol
BSONCWS: _get_code_w_scope,
BSONINT: _get_int,
BSONTIM: _get_timestamp,
BSONLON: _get_int64,
BSONMIN: lambda w, x, y, z: (MinKey(), x),
BSONMAX: lambda w, x, y, z: (MaxKey(), x)}
def _element_to_dict(data, position, obj_end, opts):
"""Decode a single key, value pair."""
element_type = data[position:position + 1]
position += 1
element_name, position = _get_c_string(data, position, opts)
value, position = _ELEMENT_GETTER[element_type](data,
position, obj_end, opts)
return element_name, value, position
def _elements_to_dict(data, position, obj_end, opts):
"""Decode a BSON document."""
result = opts.document_class()
end = obj_end - 1
while position < end:
(key, value, position) = _element_to_dict(data, position, obj_end, opts)
result[key] = value
return result
def _bson_to_dict(data, opts):
"""Decode a BSON string to document_class."""
try:
obj_size = _UNPACK_INT(data[:4])[0]
except struct.error as exc:
raise InvalidBSON(str(exc))
if obj_size != len(data):
raise InvalidBSON("invalid object size")
if data[obj_size - 1:obj_size] != b"\x00":
raise InvalidBSON("bad eoo")
try:
return _elements_to_dict(data, 4, obj_size - 1, opts)
except InvalidBSON:
raise
except Exception:
# Change exception type to InvalidBSON but preserve traceback.
_, exc_value, exc_tb = sys.exc_info()
reraise(InvalidBSON, exc_value, exc_tb)
if _USE_C:
_bson_to_dict = _cbson._bson_to_dict
_PACK_FLOAT = struct.Struct("<d").pack
_PACK_INT = struct.Struct("<i").pack
_PACK_LENGTH_SUBTYPE = struct.Struct("<iB").pack
_PACK_LONG = struct.Struct("<q").pack
_PACK_TIMESTAMP = struct.Struct("<II").pack
_LIST_NAMES = tuple(b(str(i)) + b"\x00" for i in range(1000))
def gen_list_name():
"""Generate "keys" for encoded lists in the sequence
b"0\x00", b"1\x00", b"2\x00", ...
The first 1000 keys are returned from a pre-built cache. All
subsequent keys are generated on the fly.
"""
for name in _LIST_NAMES:
yield name
counter = itertools.count(1000)
while True:
yield b(str(next(counter))) + b"\x00"
def _make_c_string_check(string):
"""Make a 'C' string, checking for embedded NUL characters."""
if isinstance(string, bytes):
if b"\x00" in string:
raise InvalidDocument("BSON keys / regex patterns must not "
"contain a NUL character")
try:
_utf_8_decode(string, None, True)
return string + b"\x00"
except UnicodeError:
raise InvalidStringData("strings in documents must be valid "
"UTF-8: %r" % string)
else:
if "\x00" in string:
raise InvalidDocument("BSON keys / regex patterns must not "
"contain a NUL character")
return _utf_8_encode(string)[0] + b"\x00"
def _make_c_string(string):
"""Make a 'C' string."""
if isinstance(string, bytes):
try:
_utf_8_decode(string, None, True)
return string + b"\x00"
except UnicodeError:
raise InvalidStringData("strings in documents must be valid "
"UTF-8: %r" % string)
else:
return _utf_8_encode(string)[0] + b"\x00"
if PY3:
def _make_name(string):
"""Make a 'C' string suitable for a BSON key."""
# Keys can only be text in python 3.
if "\x00" in string:
raise InvalidDocument("BSON keys / regex patterns must not "
"contain a NUL character")
return _utf_8_encode(string)[0] + b"\x00"
else:
# Keys can be unicode or bytes in python 2.
_make_name = _make_c_string_check
def _encode_float(name, value, dummy0, dummy1):
"""Encode a float."""
return b"\x01" + name + _PACK_FLOAT(value)
if PY3:
def _encode_bytes(name, value, dummy0, dummy1):
"""Encode a python bytes."""
# Python3 special case. Store 'bytes' as BSON binary subtype 0.
return b"\x05" + name + _PACK_INT(len(value)) + b"\x00" + value
else:
def _encode_bytes(name, value, dummy0, dummy1):
"""Encode a python str (python 2.x)."""
try:
_utf_8_decode(value, None, True)
except UnicodeError:
raise InvalidStringData("strings in documents must be valid "
"UTF-8: %r" % (value,))
return b"\x02" + name + _PACK_INT(len(value) + 1) + value + b"\x00"
def _encode_mapping(name, value, check_keys, opts):
"""Encode a mapping type."""
data = b"".join([_element_to_bson(key, val, check_keys, opts)
for key, val in iteritems(value)])
return b"\x03" + name + _PACK_INT(len(data) + 5) + data + b"\x00"
def _encode_dbref(name, value, check_keys, opts):
"""Encode bson.dbref.DBRef."""
buf = bytearray(b"\x03" + name + b"\x00\x00\x00\x00")
begin = len(buf) - 4
buf += _name_value_to_bson(b"$ref\x00",
value.collection, check_keys, opts)
buf += _name_value_to_bson(b"$id\x00",
value.id, check_keys, opts)
if value.database is not None:
buf += _name_value_to_bson(
b"$db\x00", value.database, check_keys, opts)
for key, val in iteritems(value._DBRef__kwargs):
buf += _element_to_bson(key, val, check_keys, opts)
buf += b"\x00"
buf[begin:begin + 4] = _PACK_INT(len(buf) - begin)
return bytes(buf)
def _encode_list(name, value, check_keys, opts):
"""Encode a list/tuple."""
lname = gen_list_name()
data = b"".join([_name_value_to_bson(next(lname), item,
check_keys, opts)
for item in value])
return b"\x04" + name + _PACK_INT(len(data) + 5) + data + b"\x00"
def _encode_text(name, value, dummy0, dummy1):
"""Encode a python unicode (python 2.x) / str (python 3.x)."""
value = _utf_8_encode(value)[0]
return b"\x02" + name + _PACK_INT(len(value) + 1) + value + b"\x00"
def _encode_binary(name, value, dummy0, dummy1):
"""Encode bson.binary.Binary."""
subtype = value.subtype
if subtype == 2:
value = _PACK_INT(len(value)) + value
return b"\x05" + name + _PACK_LENGTH_SUBTYPE(len(value), subtype) + value
def _encode_uuid(name, value, dummy, opts):
"""Encode uuid.UUID."""
uuid_representation = opts.uuid_representation
# Python Legacy Common Case
if uuid_representation == OLD_UUID_SUBTYPE:
return b"\x05" + name + b'\x10\x00\x00\x00\x03' + value.bytes
# Java Legacy
elif uuid_representation == JAVA_LEGACY:
from_uuid = value.bytes
data = from_uuid[0:8][::-1] + from_uuid[8:16][::-1]
return b"\x05" + name + b'\x10\x00\x00\x00\x03' + data
# C# legacy
elif uuid_representation == CSHARP_LEGACY:
# Microsoft GUID representation.
return b"\x05" + name + b'\x10\x00\x00\x00\x03' + value.bytes_le
# New
else:
return b"\x05" + name + b'\x10\x00\x00\x00\x04' + value.bytes
def _encode_objectid(name, value, dummy0, dummy1):
"""Encode bson.objectid.ObjectId."""
return b"\x07" + name + value.binary
def _encode_bool(name, value, dummy0, dummy1):
"""Encode a python boolean (True/False)."""
return b"\x08" + name + (value and b"\x01" or b"\x00")
def _encode_datetime(name, value, dummy0, dummy1):
"""Encode datetime.datetime."""
if value.utcoffset() is not None:
value = value - value.utcoffset()
millis = int(calendar.timegm(value.timetuple()) * 1000 +
value.microsecond / 1000)
return b"\x09" + name + _PACK_LONG(millis)
def _encode_none(name, dummy0, dummy1, dummy2):
"""Encode python None."""
return b"\x0A" + name
def _encode_regex(name, value, dummy0, dummy1):
"""Encode a python regex or bson.regex.Regex."""
flags = value.flags
# Python 2 common case
if flags == 0:
return b"\x0B" + name + _make_c_string_check(value.pattern) + b"\x00"
# Python 3 common case
elif flags == re.UNICODE:
return b"\x0B" + name + _make_c_string_check(value.pattern) + b"u\x00"
else:
sflags = b""
if flags & re.IGNORECASE:
sflags += b"i"
if flags & re.LOCALE:
sflags += b"l"
if flags & re.MULTILINE:
sflags += b"m"
if flags & re.DOTALL:
sflags += b"s"
if flags & re.UNICODE:
sflags += b"u"
if flags & re.VERBOSE:
sflags += b"x"
sflags += b"\x00"
return b"\x0B" + name + _make_c_string_check(value.pattern) + sflags
def _encode_code(name, value, dummy, opts):
"""Encode bson.code.Code."""
cstring = _make_c_string(value)
cstrlen = len(cstring)
if not value.scope:
return b"\x0D" + name + _PACK_INT(cstrlen) + cstring
scope = _dict_to_bson(value.scope, False, opts, False)
full_length = _PACK_INT(8 + cstrlen + len(scope))
return b"\x0F" + name + full_length + _PACK_INT(cstrlen) + cstring + scope
def _encode_int(name, value, dummy0, dummy1):
"""Encode a python int."""
if -2147483648 <= value <= 2147483647:
return b"\x10" + name + _PACK_INT(value)
else:
try:
return b"\x12" + name + _PACK_LONG(value)
except struct.error:
raise OverflowError("BSON can only handle up to 8-byte ints")
def _encode_timestamp(name, value, dummy0, dummy1):
"""Encode bson.timestamp.Timestamp."""
return b"\x11" + name + _PACK_TIMESTAMP(value.inc, value.time)
def _encode_long(name, value, dummy0, dummy1):
"""Encode a python long (python 2.x)"""
try:
return b"\x12" + name + _PACK_LONG(value)
except struct.error:
raise OverflowError("BSON can only handle up to 8-byte ints")
def _encode_minkey(name, dummy0, dummy1, dummy2):
"""Encode bson.min_key.MinKey."""
return b"\xFF" + name
def _encode_maxkey(name, dummy0, dummy1, dummy2):
"""Encode bson.max_key.MaxKey."""
return b"\x7F" + name
# Each encoder function's signature is:
# - name: utf-8 bytes
# - value: a Python data type, e.g. a Python int for _encode_int
# - check_keys: bool, whether to check for invalid names
# - opts: a CodecOptions
_ENCODERS = {
bool: _encode_bool,
bytes: _encode_bytes,
datetime.datetime: _encode_datetime,
dict: _encode_mapping,
float: _encode_float,
int: _encode_int,
list: _encode_list,
# unicode in py2, str in py3
text_type: _encode_text,
tuple: _encode_list,
type(None): _encode_none,
uuid.UUID: _encode_uuid,
Binary: _encode_binary,
Int64: _encode_long,
Code: _encode_code,
DBRef: _encode_dbref,
MaxKey: _encode_maxkey,
MinKey: _encode_minkey,
ObjectId: _encode_objectid,
Regex: _encode_regex,
RE_TYPE: _encode_regex,
SON: _encode_mapping,
Timestamp: _encode_timestamp,
UUIDLegacy: _encode_binary,
# Special case. This will never be looked up directly.
collections.Mapping: _encode_mapping,
}
_MARKERS = {
5: _encode_binary,
7: _encode_objectid,
11: _encode_regex,
13: _encode_code,
17: _encode_timestamp,
18: _encode_long,
100: _encode_dbref,
127: _encode_maxkey,
255: _encode_minkey,
}
if not PY3:
_ENCODERS[long] = _encode_long
def _name_value_to_bson(name, value, check_keys, opts):
"""Encode a single name, value pair."""
# First see if the type is already cached. KeyError will only ever
# happen once per subtype.
try:
return _ENCODERS[type(value)](name, value, check_keys, opts)
except KeyError:
pass
# Second, fall back to trying _type_marker. This has to be done
# before the loop below since users could subclass one of our
# custom types that subclasses a python built-in (e.g. Binary)
marker = getattr(value, "_type_marker", None)
if isinstance(marker, int) and marker in _MARKERS:
func = _MARKERS[marker]
# Cache this type for faster subsequent lookup.
_ENCODERS[type(value)] = func
return func(name, value, check_keys, opts)
# If all else fails test each base type. This will only happen once for
# a subtype of a supported base type.
for base in _ENCODERS:
if isinstance(value, base):
func = _ENCODERS[base]
# Cache this type for faster subsequent lookup.
_ENCODERS[type(value)] = func
return func(name, value, check_keys, opts)
raise InvalidDocument("cannot convert value of type %s to bson" %
type(value))
def _element_to_bson(key, value, check_keys, opts):
"""Encode a single key, value pair."""
if not isinstance(key, string_type):
raise InvalidDocument("documents must have only string keys, "
"key was %r" % (key,))
if check_keys:
if key.startswith("$"):
raise InvalidDocument("key %r must not start with '$'" % (key,))
if "." in key:
raise InvalidDocument("key %r must not contain '.'" % (key,))
name = _make_name(key)
return _name_value_to_bson(name, value, check_keys, opts)
def _dict_to_bson(doc, check_keys, opts, top_level=True):
"""Encode a document to BSON."""
try:
elements = []
if top_level and "_id" in doc:
elements.append(_name_value_to_bson(b"_id\x00", doc["_id"],
check_keys, opts))
for (key, value) in iteritems(doc):
if not top_level or key != "_id":
elements.append(_element_to_bson(key, value,
check_keys, opts))
except AttributeError:
raise TypeError("encoder expected a mapping type but got: %r" % (doc,))
encoded = b"".join(elements)
return _PACK_INT(len(encoded) + 5) + encoded + b"\x00"
if _USE_C:
_dict_to_bson = _cbson._dict_to_bson
_CODEC_OPTIONS_TYPE_ERROR = TypeError(
"codec_options must be an instance of CodecOptions")
def decode_all(data, codec_options=DEFAULT_CODEC_OPTIONS):
"""Decode BSON data to multiple documents.
`data` must be a string of concatenated, valid, BSON-encoded
documents.
:Parameters:
- `data`: BSON data
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`.
.. versionchanged:: 3.0
Removed `compile_re` option: PyMongo now always represents BSON regular
expressions as :class:`~bson.regex.Regex` objects. Use
:meth:`~bson.regex.Regex.try_compile` to attempt to convert from a
BSON regular expression to a Python regular expression object.
Replaced `as_class`, `tz_aware`, and `uuid_subtype` options with
`codec_options`.
.. versionchanged:: 2.7
Added `compile_re` option. If set to False, PyMongo represented BSON
regular expressions as :class:`~bson.regex.Regex` objects instead of
attempting to compile BSON regular expressions as Python native
regular expressions, thus preventing errors for some incompatible
patterns, see `PYTHON-500`_.
.. _PYTHON-500: https://jira.mongodb.org/browse/PYTHON-500
"""
if not isinstance(codec_options, CodecOptions):
raise _CODEC_OPTIONS_TYPE_ERROR
docs = []
position = 0
end = len(data) - 1
try:
while position < end:
obj_size = _UNPACK_INT(data[position:position + 4])[0]
if len(data) - position < obj_size:
raise InvalidBSON("invalid object size")
obj_end = position + obj_size - 1
if data[obj_end:position + obj_size] != b"\x00":
raise InvalidBSON("bad eoo")
docs.append(_elements_to_dict(data,
position + 4,
obj_end,
codec_options))
position += obj_size
return docs
except InvalidBSON:
raise
except Exception:
# Change exception type to InvalidBSON but preserve traceback.
_, exc_value, exc_tb = sys.exc_info()
reraise(InvalidBSON, exc_value, exc_tb)
if _USE_C:
decode_all = _cbson.decode_all
def decode_iter(data, codec_options=DEFAULT_CODEC_OPTIONS):
"""Decode BSON data to multiple documents as a generator.
Works similarly to the decode_all function, but yields one document at a
time.
`data` must be a string of concatenated, valid, BSON-encoded
documents.
:Parameters:
- `data`: BSON data
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`.
.. versionchanged:: 3.0
Replaced `as_class`, `tz_aware`, and `uuid_subtype` options with
`codec_options`.
.. versionadded:: 2.8
"""
if not isinstance(codec_options, CodecOptions):
raise _CODEC_OPTIONS_TYPE_ERROR
position = 0
end = len(data) - 1
while position < end:
obj_size = _UNPACK_INT(data[position:position + 4])[0]
elements = data[position:position + obj_size]
position += obj_size
yield _bson_to_dict(elements, codec_options)
def decode_file_iter(file_obj, codec_options=DEFAULT_CODEC_OPTIONS):
"""Decode bson data from a file to multiple documents as a generator.
Works similarly to the decode_all function, but reads from the file object
in chunks and parses bson in chunks, yielding one document at a time.
:Parameters:
- `file_obj`: A file object containing BSON data.
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`.
.. versionchanged:: 3.0
Replaced `as_class`, `tz_aware`, and `uuid_subtype` options with
`codec_options`.
.. versionadded:: 2.8
"""
while True:
# Read size of next object.
size_data = file_obj.read(4)
if len(size_data) == 0:
break # Finished with file normaly.
elif len(size_data) != 4:
raise InvalidBSON("cut off in middle of objsize")
obj_size = _UNPACK_INT(size_data)[0] - 4
elements = size_data + file_obj.read(obj_size)
yield _bson_to_dict(elements, codec_options)
def is_valid(bson):
"""Check that the given string represents valid :class:`BSON` data.
Raises :class:`TypeError` if `bson` is not an instance of
:class:`str` (:class:`bytes` in python 3). Returns ``True``
if `bson` is valid :class:`BSON`, ``False`` otherwise.
:Parameters:
- `bson`: the data to be validated
"""
if not isinstance(bson, bytes):
raise TypeError("BSON data must be an instance of a subclass of bytes")
try:
_bson_to_dict(bson, DEFAULT_CODEC_OPTIONS)
return True
except Exception:
return False
class BSON(bytes):
"""BSON (Binary JSON) data.
"""
@classmethod
def encode(cls, document, check_keys=False,
codec_options=DEFAULT_CODEC_OPTIONS):
"""Encode a document to a new :class:`BSON` instance.
A document can be any mapping type (like :class:`dict`).
Raises :class:`TypeError` if `document` is not a mapping type,
or contains keys that are not instances of
:class:`basestring` (:class:`str` in python 3). Raises
:class:`~bson.errors.InvalidDocument` if `document` cannot be
converted to :class:`BSON`.
:Parameters:
- `document`: mapping type representing a document
- `check_keys` (optional): check if keys start with '$' or
contain '.', raising :class:`~bson.errors.InvalidDocument` in
either case
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`.
.. versionchanged:: 3.0
Replaced `uuid_subtype` option with `codec_options`.
"""
if not isinstance(codec_options, CodecOptions):
raise _CODEC_OPTIONS_TYPE_ERROR
return cls(_dict_to_bson(document, check_keys, codec_options))
def decode(self, codec_options=DEFAULT_CODEC_OPTIONS):
"""Decode this BSON data.
By default, returns a BSON document represented as a Python
:class:`dict`. To use a different :class:`MutableMapping` class,
configure a :class:`~bson.codec_options.CodecOptions`::
>>> import collections # From Python standard library.
>>> import bson
>>> from bson.codec_options import CodecOptions
>>> data = bson.BSON.encode({'a': 1})
>>> decoded_doc = bson.BSON.decode(data)
<type 'dict'>
>>> options = CodecOptions(document_class=collections.OrderedDict)
>>> decoded_doc = bson.BSON.decode(data, codec_options=options)
>>> type(decoded_doc)
<class 'collections.OrderedDict'>
:Parameters:
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`.
.. versionchanged:: 3.0
Removed `compile_re` option: PyMongo now always represents BSON
regular expressions as :class:`~bson.regex.Regex` objects. Use
:meth:`~bson.regex.Regex.try_compile` to attempt to convert from a
BSON regular expression to a Python regular expression object.
Replaced `as_class`, `tz_aware`, and `uuid_subtype` options with
`codec_options`.
.. versionchanged:: 2.7
Added `compile_re` option. If set to False, PyMongo represented BSON
regular expressions as :class:`~bson.regex.Regex` objects instead of
attempting to compile BSON regular expressions as Python native
regular expressions, thus preventing errors for some incompatible
patterns, see `PYTHON-500`_.
.. _PYTHON-500: https://jira.mongodb.org/browse/PYTHON-500
"""
if not isinstance(codec_options, CodecOptions):
raise _CODEC_OPTIONS_TYPE_ERROR
return _bson_to_dict(self, codec_options)
def has_c():
"""Is the C extension installed?
"""
return _USE_C
| apache-2.0 |
kived/plyer | plyer/platforms/macosx/wifi.py | 2 | 4929 | from plyer.facades import Wifi
from pyobjus.dylib_manager import load_framework, INCLUDE
from pyobjus import autoclass
load_framework(INCLUDE.Foundation)
load_framework(INCLUDE.CoreWLAN)
CWInterface = autoclass('CWInterface')
CWNetwork = autoclass('CWNetwork')
CWWiFiClient = autoclass('CWWiFiClient')
NSArray = autoclass('NSArray')
NSDictionary = autoclass('NSDictionary')
NSString = autoclass('NSString')
class OSXWifi(Wifi):
names = {}
def _is_enabled(self):
'''
Returns `True` if the Wifi is enabled else returns `False`.
'''
return CWWiFiClient.sharedWiFiClient().interface().powerOn()
def _get_network_info(self, name):
'''
Returns all the network information.
'''
def ns(x):
NSString.alloc().initWithUTF8String_(x)
accessNetworkType = self.names[name].accessNetworkType
aggregateRSSI = self.names[name].aggregateRSSI
beaconInterval = self.names[name].beaconInterval
bssid = self.names[name].bssid.UTF8String()
countryCode = self.names[name].countryCode
hasInternet = self.names[name].hasInternet
hasInterworkingIE = self.names[name].hasInterworkingIE
hessid = self.names[name].hessid
ibss = self.names[name].ibss
isAdditionalStepRequiredForAccess = \
self.names[name].isAdditionalStepRequiredForAccess
isCarPlayNetwork = self.names[name].isCarPlayNetwork
isEmergencyServicesReachable = \
self.names[name].isEmergencyServicesReachable
isPasspoint = self.names[name].isPasspoint
isPersonalHotspot = self.names[name].isPersonalHotspot
isUnauthenticatedEmergencyServiceAccessible = \
self.names[name].isUnauthenticatedEmergencyServiceAccessible
noiseMeasurement = self.names[name].noiseMeasurement
physicalLayerMode = self.names[name].physicalLayerMode
rssiValue = self.names[name].rssiValue
securityType = self.names[name].securityType
ssid = self.names[name].ssid.UTF8String()
supportsEasyConnect = self.names[name].supportsEasyConnect
supportsWPS = self.names[name].supportsWPS
venueGroup = self.names[name].venueGroup
venueType = self.names[name].venueType
return {'accessNetworkType': accessNetworkType,
'aggregateRSSI': aggregateRSSI,
'beaconInterval': beaconInterval,
'bssid': bssid,
'countryCode': countryCode,
'hasInternet': hasInternet,
'hasInternet': hasInternet,
'hasInterworkingIE': hasInterworkingIE,
'hessid': hessid,
'ibss': ibss,
'isAdditionalStepRequiredForAccess':
isAdditionalStepRequiredForAccess,
'isCarPlayNetwork': isCarPlayNetwork,
'isEmergencyServicesReachable': isEmergencyServicesReachable,
'isPasspoint': isPasspoint,
'isPersonalHotspot': isPersonalHotspot,
'isUnauthenticatedEmergencyServiceAccessible':
isUnauthenticatedEmergencyServiceAccessible,
'noiseMeasurement': noiseMeasurement,
'physicalLayerMode': physicalLayerMode,
'rssiValue': rssiValue,
'securityType': securityType,
'ssid': ssid,
'supportsEasyConnect': supportsEasyConnect,
'supportsWPS': supportsWPS,
'venueGroup': venueGroup,
'venueType': venueType}
def _start_scanning(self):
'''
Starts scanning for available Wi-Fi networks.
'''
if self._is_enabled():
self.names = {}
c = CWInterface.interface()
scan = c.scanForNetworksWithName_error_(None, None)
cnt = scan.allObjects().count()
for i in range(cnt):
self.names[
scan.allObjects().objectAtIndex_(i).ssid.UTF8String()] \
= scan.allObjects().objectAtIndex_(i)
else:
raise Exception("Wifi not enabled.")
def _get_available_wifi(self):
'''
Returns the name of available networks.
'''
return self.names.keys()
def _connect(self, network, parameters):
'''
Expects 2 parameters:
- name/ssid of the network.
- password: dict type
'''
password = parameters['password']
network_object = self.names[network]
CWInterface.interface().associateToNetwork_password_error_(
network_object,
password,
None)
return
def _disconnect(self):
'''
Disconnect from network.
'''
CWInterface.interface().disassociate()
return
def instance():
return OSXWifi()
| mit |
evensonbryan/yocto-autobuilder | lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/test/test_sslverify.py | 5 | 21662 | # Copyright 2005 Divmod, Inc. See LICENSE file for details
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet._sslverify}.
"""
import itertools
try:
from OpenSSL import SSL
from OpenSSL.crypto import PKey, X509, X509Req
from OpenSSL.crypto import TYPE_RSA
from twisted.internet import _sslverify as sslverify
except ImportError:
pass
from twisted.trial import unittest
from twisted.internet import protocol, defer, reactor
from twisted.python.reflect import objgrep, isSame
from twisted.python import log
from twisted.internet.error import CertificateError, ConnectionLost
from twisted.internet import interfaces
# A couple of static PEM-format certificates to be used by various tests.
A_HOST_CERTIFICATE_PEM = """
-----BEGIN CERTIFICATE-----
MIIC2jCCAkMCAjA5MA0GCSqGSIb3DQEBBAUAMIG0MQswCQYDVQQGEwJVUzEiMCAG
A1UEAxMZZXhhbXBsZS50d2lzdGVkbWF0cml4LmNvbTEPMA0GA1UEBxMGQm9zdG9u
MRwwGgYDVQQKExNUd2lzdGVkIE1hdHJpeCBMYWJzMRYwFAYDVQQIEw1NYXNzYWNo
dXNldHRzMScwJQYJKoZIhvcNAQkBFhhub2JvZHlAdHdpc3RlZG1hdHJpeC5jb20x
ETAPBgNVBAsTCFNlY3VyaXR5MB4XDTA2MDgxNjAxMDEwOFoXDTA3MDgxNjAxMDEw
OFowgbQxCzAJBgNVBAYTAlVTMSIwIAYDVQQDExlleGFtcGxlLnR3aXN0ZWRtYXRy
aXguY29tMQ8wDQYDVQQHEwZCb3N0b24xHDAaBgNVBAoTE1R3aXN0ZWQgTWF0cml4
IExhYnMxFjAUBgNVBAgTDU1hc3NhY2h1c2V0dHMxJzAlBgkqhkiG9w0BCQEWGG5v
Ym9keUB0d2lzdGVkbWF0cml4LmNvbTERMA8GA1UECxMIU2VjdXJpdHkwgZ8wDQYJ
KoZIhvcNAQEBBQADgY0AMIGJAoGBAMzH8CDF/U91y/bdbdbJKnLgnyvQ9Ig9ZNZp
8hpsu4huil60zF03+Lexg2l1FIfURScjBuaJMR6HiMYTMjhzLuByRZ17KW4wYkGi
KXstz03VIKy4Tjc+v4aXFI4XdRw10gGMGQlGGscXF/RSoN84VoDKBfOMWdXeConJ
VyC4w3iJAgMBAAEwDQYJKoZIhvcNAQEEBQADgYEAviMT4lBoxOgQy32LIgZ4lVCj
JNOiZYg8GMQ6y0ugp86X80UjOvkGtNf/R7YgED/giKRN/q/XJiLJDEhzknkocwmO
S+4b2XpiaZYxRyKWwL221O7CGmtWYyZl2+92YYmmCiNzWQPfP6BOMlfax0AGLHls
fXzCWdG0O/3Lk2SRM0I=
-----END CERTIFICATE-----
"""
A_PEER_CERTIFICATE_PEM = """
-----BEGIN CERTIFICATE-----
MIIC3jCCAkcCAjA6MA0GCSqGSIb3DQEBBAUAMIG2MQswCQYDVQQGEwJVUzEiMCAG
A1UEAxMZZXhhbXBsZS50d2lzdGVkbWF0cml4LmNvbTEPMA0GA1UEBxMGQm9zdG9u
MRwwGgYDVQQKExNUd2lzdGVkIE1hdHJpeCBMYWJzMRYwFAYDVQQIEw1NYXNzYWNo
dXNldHRzMSkwJwYJKoZIhvcNAQkBFhpzb21lYm9keUB0d2lzdGVkbWF0cml4LmNv
bTERMA8GA1UECxMIU2VjdXJpdHkwHhcNMDYwODE2MDEwMTU2WhcNMDcwODE2MDEw
MTU2WjCBtjELMAkGA1UEBhMCVVMxIjAgBgNVBAMTGWV4YW1wbGUudHdpc3RlZG1h
dHJpeC5jb20xDzANBgNVBAcTBkJvc3RvbjEcMBoGA1UEChMTVHdpc3RlZCBNYXRy
aXggTGFiczEWMBQGA1UECBMNTWFzc2FjaHVzZXR0czEpMCcGCSqGSIb3DQEJARYa
c29tZWJvZHlAdHdpc3RlZG1hdHJpeC5jb20xETAPBgNVBAsTCFNlY3VyaXR5MIGf
MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCnm+WBlgFNbMlHehib9ePGGDXF+Nz4
CjGuUmVBaXCRCiVjg3kSDecwqfb0fqTksBZ+oQ1UBjMcSh7OcvFXJZnUesBikGWE
JE4V8Bjh+RmbJ1ZAlUPZ40bAkww0OpyIRAGMvKG+4yLFTO4WDxKmfDcrOb6ID8WJ
e1u+i3XGkIf/5QIDAQABMA0GCSqGSIb3DQEBBAUAA4GBAD4Oukm3YYkhedUepBEA
vvXIQhVDqL7mk6OqYdXmNj6R7ZMC8WWvGZxrzDI1bZuB+4aIxxd1FXC3UOHiR/xg
i9cDl1y8P/qRp4aEBNF6rI0D4AxTbfnHQx4ERDAOShJdYZs/2zifPJ6va6YvrEyr
yqDtGhklsWW3ZwBzEh5VEOUp
-----END CERTIFICATE-----
"""
counter = itertools.count().next
def makeCertificate(**kw):
keypair = PKey()
keypair.generate_key(TYPE_RSA, 512)
certificate = X509()
certificate.gmtime_adj_notBefore(0)
certificate.gmtime_adj_notAfter(60 * 60 * 24 * 365) # One year
for xname in certificate.get_issuer(), certificate.get_subject():
for (k, v) in kw.items():
setattr(xname, k, v)
certificate.set_serial_number(counter())
certificate.set_pubkey(keypair)
certificate.sign(keypair, "md5")
return keypair, certificate
class DataCallbackProtocol(protocol.Protocol):
def dataReceived(self, data):
d, self.factory.onData = self.factory.onData, None
if d is not None:
d.callback(data)
def connectionLost(self, reason):
d, self.factory.onLost = self.factory.onLost, None
if d is not None:
d.errback(reason)
class WritingProtocol(protocol.Protocol):
byte = 'x'
def connectionMade(self):
self.transport.write(self.byte)
def connectionLost(self, reason):
self.factory.onLost.errback(reason)
class OpenSSLOptions(unittest.TestCase):
serverPort = clientConn = None
onServerLost = onClientLost = None
sKey = None
sCert = None
cKey = None
cCert = None
def setUp(self):
"""
Create class variables of client and server certificates.
"""
self.sKey, self.sCert = makeCertificate(
O="Server Test Certificate",
CN="server")
self.cKey, self.cCert = makeCertificate(
O="Client Test Certificate",
CN="client")
def tearDown(self):
if self.serverPort is not None:
self.serverPort.stopListening()
if self.clientConn is not None:
self.clientConn.disconnect()
L = []
if self.onServerLost is not None:
L.append(self.onServerLost)
if self.onClientLost is not None:
L.append(self.onClientLost)
return defer.DeferredList(L, consumeErrors=True)
def loopback(self, serverCertOpts, clientCertOpts,
onServerLost=None, onClientLost=None, onData=None):
if onServerLost is None:
self.onServerLost = onServerLost = defer.Deferred()
if onClientLost is None:
self.onClientLost = onClientLost = defer.Deferred()
if onData is None:
onData = defer.Deferred()
serverFactory = protocol.ServerFactory()
serverFactory.protocol = DataCallbackProtocol
serverFactory.onLost = onServerLost
serverFactory.onData = onData
clientFactory = protocol.ClientFactory()
clientFactory.protocol = WritingProtocol
clientFactory.onLost = onClientLost
self.serverPort = reactor.listenSSL(0, serverFactory, serverCertOpts)
self.clientConn = reactor.connectSSL('127.0.0.1',
self.serverPort.getHost().port, clientFactory, clientCertOpts)
def test_abbreviatingDistinguishedNames(self):
"""
Check that abbreviations used in certificates correctly map to
complete names.
"""
self.assertEqual(
sslverify.DN(CN='a', OU='hello'),
sslverify.DistinguishedName(commonName='a',
organizationalUnitName='hello'))
self.assertNotEquals(
sslverify.DN(CN='a', OU='hello'),
sslverify.DN(CN='a', OU='hello', emailAddress='xxx'))
dn = sslverify.DN(CN='abcdefg')
self.assertRaises(AttributeError, setattr, dn, 'Cn', 'x')
self.assertEqual(dn.CN, dn.commonName)
dn.CN = 'bcdefga'
self.assertEqual(dn.CN, dn.commonName)
def testInspectDistinguishedName(self):
n = sslverify.DN(commonName='common name',
organizationName='organization name',
organizationalUnitName='organizational unit name',
localityName='locality name',
stateOrProvinceName='state or province name',
countryName='country name',
emailAddress='email address')
s = n.inspect()
for k in [
'common name',
'organization name',
'organizational unit name',
'locality name',
'state or province name',
'country name',
'email address']:
self.assertIn(k, s, "%r was not in inspect output." % (k,))
self.assertIn(k.title(), s, "%r was not in inspect output." % (k,))
def testInspectDistinguishedNameWithoutAllFields(self):
n = sslverify.DN(localityName='locality name')
s = n.inspect()
for k in [
'common name',
'organization name',
'organizational unit name',
'state or province name',
'country name',
'email address']:
self.assertNotIn(k, s, "%r was in inspect output." % (k,))
self.assertNotIn(k.title(), s, "%r was in inspect output." % (k,))
self.assertIn('locality name', s)
self.assertIn('Locality Name', s)
def test_inspectCertificate(self):
"""
Test that the C{inspect} method of L{sslverify.Certificate} returns
a human-readable string containing some basic information about the
certificate.
"""
c = sslverify.Certificate.loadPEM(A_HOST_CERTIFICATE_PEM)
self.assertEqual(
c.inspect().split('\n'),
["Certificate For Subject:",
" Common Name: example.twistedmatrix.com",
" Country Name: US",
" Email Address: [email protected]",
" Locality Name: Boston",
" Organization Name: Twisted Matrix Labs",
" Organizational Unit Name: Security",
" State Or Province Name: Massachusetts",
"",
"Issuer:",
" Common Name: example.twistedmatrix.com",
" Country Name: US",
" Email Address: [email protected]",
" Locality Name: Boston",
" Organization Name: Twisted Matrix Labs",
" Organizational Unit Name: Security",
" State Or Province Name: Massachusetts",
"",
"Serial Number: 12345",
"Digest: C4:96:11:00:30:C3:EC:EE:A3:55:AA:ED:8C:84:85:18",
"Public Key with Hash: ff33994c80812aa95a79cdb85362d054"])
def test_certificateOptionsSerialization(self):
"""
Test that __setstate__(__getstate__()) round-trips properly.
"""
firstOpts = sslverify.OpenSSLCertificateOptions(
privateKey=self.sKey,
certificate=self.sCert,
method=SSL.SSLv3_METHOD,
verify=True,
caCerts=[self.sCert],
verifyDepth=2,
requireCertificate=False,
verifyOnce=False,
enableSingleUseKeys=False,
enableSessions=False,
fixBrokenPeers=True,
enableSessionTickets=True)
context = firstOpts.getContext()
state = firstOpts.__getstate__()
# The context shouldn't be in the state to serialize
self.failIf(objgrep(state, context, isSame),
objgrep(state, context, isSame))
opts = sslverify.OpenSSLCertificateOptions()
opts.__setstate__(state)
self.assertEqual(opts.privateKey, self.sKey)
self.assertEqual(opts.certificate, self.sCert)
self.assertEqual(opts.method, SSL.SSLv3_METHOD)
self.assertEqual(opts.verify, True)
self.assertEqual(opts.caCerts, [self.sCert])
self.assertEqual(opts.verifyDepth, 2)
self.assertEqual(opts.requireCertificate, False)
self.assertEqual(opts.verifyOnce, False)
self.assertEqual(opts.enableSingleUseKeys, False)
self.assertEqual(opts.enableSessions, False)
self.assertEqual(opts.fixBrokenPeers, True)
self.assertEqual(opts.enableSessionTickets, True)
def test_certificateOptionsSessionTickets(self):
"""
Enabling session tickets should not set the OP_NO_TICKET option.
"""
opts = sslverify.OpenSSLCertificateOptions(enableSessionTickets=True)
ctx = opts.getContext()
self.assertEqual(0, ctx.set_options(0) & 0x00004000)
def test_certificateOptionsSessionTicketsDisabled(self):
"""
Enabling session tickets should set the OP_NO_TICKET option.
"""
opts = sslverify.OpenSSLCertificateOptions(enableSessionTickets=False)
ctx = opts.getContext()
self.assertEqual(0x00004000, ctx.set_options(0) & 0x00004000)
def test_allowedAnonymousClientConnection(self):
"""
Check that anonymous connections are allowed when certificates aren't
required on the server.
"""
onData = defer.Deferred()
self.loopback(sslverify.OpenSSLCertificateOptions(privateKey=self.sKey,
certificate=self.sCert, requireCertificate=False),
sslverify.OpenSSLCertificateOptions(
requireCertificate=False),
onData=onData)
return onData.addCallback(
lambda result: self.assertEqual(result, WritingProtocol.byte))
def test_refusedAnonymousClientConnection(self):
"""
Check that anonymous connections are refused when certificates are
required on the server.
"""
onServerLost = defer.Deferred()
onClientLost = defer.Deferred()
self.loopback(sslverify.OpenSSLCertificateOptions(privateKey=self.sKey,
certificate=self.sCert, verify=True,
caCerts=[self.sCert], requireCertificate=True),
sslverify.OpenSSLCertificateOptions(
requireCertificate=False),
onServerLost=onServerLost,
onClientLost=onClientLost)
d = defer.DeferredList([onClientLost, onServerLost],
consumeErrors=True)
def afterLost(((cSuccess, cResult), (sSuccess, sResult))):
self.failIf(cSuccess)
self.failIf(sSuccess)
# Win32 fails to report the SSL Error, and report a connection lost
# instead: there is a race condition so that's not totally
# surprising (see ticket #2877 in the tracker)
self.assertIsInstance(cResult.value, (SSL.Error, ConnectionLost))
self.assertIsInstance(sResult.value, SSL.Error)
return d.addCallback(afterLost)
def test_failedCertificateVerification(self):
"""
Check that connecting with a certificate not accepted by the server CA
fails.
"""
onServerLost = defer.Deferred()
onClientLost = defer.Deferred()
self.loopback(sslverify.OpenSSLCertificateOptions(privateKey=self.sKey,
certificate=self.sCert, verify=False,
requireCertificate=False),
sslverify.OpenSSLCertificateOptions(verify=True,
requireCertificate=False, caCerts=[self.cCert]),
onServerLost=onServerLost,
onClientLost=onClientLost)
d = defer.DeferredList([onClientLost, onServerLost],
consumeErrors=True)
def afterLost(((cSuccess, cResult), (sSuccess, sResult))):
self.failIf(cSuccess)
self.failIf(sSuccess)
return d.addCallback(afterLost)
def test_successfulCertificateVerification(self):
"""
Test a successful connection with client certificate validation on
server side.
"""
onData = defer.Deferred()
self.loopback(sslverify.OpenSSLCertificateOptions(privateKey=self.sKey,
certificate=self.sCert, verify=False,
requireCertificate=False),
sslverify.OpenSSLCertificateOptions(verify=True,
requireCertificate=True, caCerts=[self.sCert]),
onData=onData)
return onData.addCallback(
lambda result: self.assertEqual(result, WritingProtocol.byte))
def test_successfulSymmetricSelfSignedCertificateVerification(self):
"""
Test a successful connection with validation on both server and client
sides.
"""
onData = defer.Deferred()
self.loopback(sslverify.OpenSSLCertificateOptions(privateKey=self.sKey,
certificate=self.sCert, verify=True,
requireCertificate=True, caCerts=[self.cCert]),
sslverify.OpenSSLCertificateOptions(privateKey=self.cKey,
certificate=self.cCert, verify=True,
requireCertificate=True, caCerts=[self.sCert]),
onData=onData)
return onData.addCallback(
lambda result: self.assertEqual(result, WritingProtocol.byte))
def test_verification(self):
"""
Check certificates verification building custom certificates data.
"""
clientDN = sslverify.DistinguishedName(commonName='client')
clientKey = sslverify.KeyPair.generate()
clientCertReq = clientKey.certificateRequest(clientDN)
serverDN = sslverify.DistinguishedName(commonName='server')
serverKey = sslverify.KeyPair.generate()
serverCertReq = serverKey.certificateRequest(serverDN)
clientSelfCertReq = clientKey.certificateRequest(clientDN)
clientSelfCertData = clientKey.signCertificateRequest(
clientDN, clientSelfCertReq, lambda dn: True, 132)
clientSelfCert = clientKey.newCertificate(clientSelfCertData)
serverSelfCertReq = serverKey.certificateRequest(serverDN)
serverSelfCertData = serverKey.signCertificateRequest(
serverDN, serverSelfCertReq, lambda dn: True, 516)
serverSelfCert = serverKey.newCertificate(serverSelfCertData)
clientCertData = serverKey.signCertificateRequest(
serverDN, clientCertReq, lambda dn: True, 7)
clientCert = clientKey.newCertificate(clientCertData)
serverCertData = clientKey.signCertificateRequest(
clientDN, serverCertReq, lambda dn: True, 42)
serverCert = serverKey.newCertificate(serverCertData)
onData = defer.Deferred()
serverOpts = serverCert.options(serverSelfCert)
clientOpts = clientCert.options(clientSelfCert)
self.loopback(serverOpts,
clientOpts,
onData=onData)
return onData.addCallback(
lambda result: self.assertEqual(result, WritingProtocol.byte))
if interfaces.IReactorSSL(reactor, None) is None:
OpenSSLOptions.skip = "Reactor does not support SSL, cannot run SSL tests"
class _NotSSLTransport:
def getHandle(self):
return self
class _MaybeSSLTransport:
def getHandle(self):
return self
def get_peer_certificate(self):
return None
def get_host_certificate(self):
return None
class _ActualSSLTransport:
def getHandle(self):
return self
def get_host_certificate(self):
return sslverify.Certificate.loadPEM(A_HOST_CERTIFICATE_PEM).original
def get_peer_certificate(self):
return sslverify.Certificate.loadPEM(A_PEER_CERTIFICATE_PEM).original
class Constructors(unittest.TestCase):
def test_peerFromNonSSLTransport(self):
"""
Verify that peerFromTransport raises an exception if the transport
passed is not actually an SSL transport.
"""
x = self.assertRaises(CertificateError,
sslverify.Certificate.peerFromTransport,
_NotSSLTransport())
self.failUnless(str(x).startswith("non-TLS"))
def test_peerFromBlankSSLTransport(self):
"""
Verify that peerFromTransport raises an exception if the transport
passed is an SSL transport, but doesn't have a peer certificate.
"""
x = self.assertRaises(CertificateError,
sslverify.Certificate.peerFromTransport,
_MaybeSSLTransport())
self.failUnless(str(x).startswith("TLS"))
def test_hostFromNonSSLTransport(self):
"""
Verify that hostFromTransport raises an exception if the transport
passed is not actually an SSL transport.
"""
x = self.assertRaises(CertificateError,
sslverify.Certificate.hostFromTransport,
_NotSSLTransport())
self.failUnless(str(x).startswith("non-TLS"))
def test_hostFromBlankSSLTransport(self):
"""
Verify that hostFromTransport raises an exception if the transport
passed is an SSL transport, but doesn't have a host certificate.
"""
x = self.assertRaises(CertificateError,
sslverify.Certificate.hostFromTransport,
_MaybeSSLTransport())
self.failUnless(str(x).startswith("TLS"))
def test_hostFromSSLTransport(self):
"""
Verify that hostFromTransport successfully creates the correct
certificate if passed a valid SSL transport.
"""
self.assertEqual(
sslverify.Certificate.hostFromTransport(
_ActualSSLTransport()).serialNumber(),
12345)
def test_peerFromSSLTransport(self):
"""
Verify that peerFromTransport successfully creates the correct
certificate if passed a valid SSL transport.
"""
self.assertEqual(
sslverify.Certificate.peerFromTransport(
_ActualSSLTransport()).serialNumber(),
12346)
if interfaces.IReactorSSL(reactor, None) is None:
Constructors.skip = "Reactor does not support SSL, cannot run SSL tests"
| gpl-2.0 |
hcseob/py_spectre | py_spectre/psf.py | 1 | 50756 | # -*- coding: latin-1 -*-
"""
Copyright (c) 2008 Pycircuit Development Team
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
a. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
b. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
c. Neither the name of the Pycircuit nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
"""
import unittest
import struct, os, re
import operator
import numpy
# import psfasc
from copy import copy
from struct import unpack, pack
class PSFInvalid(Exception):
pass
def warning(str):
print "Warning: "+str
def indent(str, n=2):
return "\n".join([' '*n+s for s in str.split("\n")])
class PSFData(object):
@classmethod
def fromFile(cls, file):
obj = cls()
obj.deSerializeFile(file)
return obj
size=None
def __init__(self, value=None, extarg=None):
self.value = value
self.extarg = extarg
def setValue(self, value):
self.value = value
def __eq__(self, a):
return self.value == a
def __cmp__(self, a):
return cmp(self.value, a)
def __hash__(self):
return hash(self.value)
def deSerializeFile(self, file):
pass
def getSize(self):
self.size
def getValue(self):
return self.value
def __str__(self):
return str(self.value)
def toPSFasc(self, prec=None):
return str(self)
def __repr__(self):
return self.value.__repr__()
class PSFNumber(PSFData):
def __int__(self):
return self.value
def __add__(self, a):
return UInt32(self.value+int(a))
def __mul__(self, a):
return UInt32(self.value*int(a))
def __radd__(self, a):
return UInt32(self.value+int(a))
def __sub__(self, a):
return UInt32(self.value-int(a))
def __rsub__(self, a):
return UInt32(int(a)-self.value)
def __div__(self, a):
return UInt32(self.value/int(a))
def __rdiv__(self, a):
return UInt32(int(a)/self.value)
def __floordiv__(self, a):
return UInt32(self.value//int(a))
def __rfloordiv__(self, a):
return UInt32(int(a)//self.value)
def __mod__(self, a):
return UInt32(self.value%int(a))
class Int8(PSFNumber):
size=4
def deSerializeFile(self, file, size=None):
data=file.read(self.size)
self.value = unpack("b",data[3])[0]
class UInt8(PSFNumber):
size=4
def deSerializeFile(self, file, size=None):
data=file.read(self.size)
self.value = unpack("B",data[3])[0]
class Int32(PSFNumber):
size=4
def deSerializeFile(self, file, size=None):
self.value = unpack(">i",file.read(self.size))[0]
class UInt32(PSFNumber):
size=4
def deSerializeFile(self, file, size=None):
self.value = unpack(">I",file.read(self.size))[0]
class Int64(PSFNumber):
size=8
def __int__(self):
return self.value
def deSerializeFile(self, file, size=None):
self.value = unpack(">q",file.read(self.size))[0]
class UInt64(PSFNumber):
size=8
def __int__(self):
return self.value
def deSerializeFile(self, file, size=None):
self.value = unpack(">Q",file.read(self.size))[0]
class Float64(PSFNumber):
size=8
def __float__(self):
return float(self.value)
def toPSFasc(self, prec=6):
if prec:
fmt=('%%#%dg'%prec)
else:
fmt='%#g'
return fmt%self.value
def deSerializeFile(self, file, size=None):
self.value = unpack(">d",file.read(self.size))[0]
class Float32(PSFNumber):
size=4
def __float__(self):
return float(self.value)
def deSerializeFile(self, file, size=None):
self.value = unpack(">f",file.read(self.size))[0]
class ComplexFloat64(PSFNumber):
size=16
def toPSFasc(self, prec=6):
if prec:
fmt=('%%#%dg'%prec)
else:
fmt='%#g'
return "(" + fmt%self.value.real + " " + fmt%self.value.imag + ")"
def deSerializeFile(self, file, size=None):
re,im = unpack(">dd",file.read(self.size))
self.value = complex(re,im)
class String(PSFData):
def __str__(self):
return self.value
def deSerializeFile(self, file, size=None):
self.len = unpack(">I",file.read(4))[0]
if self.len < 0x100:
self.value = file.read(self.len)
# Pad to 32-bit boundary
file.read((4-self.len)%4)
else:
raise Exception("String too long %d"%self.len)
def toPSFasc(self, prec=None):
return "\""+str(self.value)+"\""
class Struct(PSFData):
def __init__(self, structdef, value=None):
self.structdef = structdef
self.value = {}
if value:
self.setValue(value)
def __getitem__(self, key):
return self.value[key]
def getValue(self):
return dict([(k,v.getValue()) for k,v in self.value.items()])
def setValue(self, value):
assert(value != None and len(value) == len(self.structdef.children))
for element, val in zip(self.structdef.children, value):
valueobj = element.getDataObj()
valueobj.setValue(val)
self.value[element.name] = valueobj
def deSerializeFile(self, file):
for element in self.structdef.children:
value = element.getDataObj()
value.deSerializeFile(file)
self.value[element.name] = value
def toPSFasc(self, prec=None):
s="(\n"
for element in self.structdef.children:
s+=self.value[element.name].toPSFasc(prec)+"\n"
s+=")"
return s
def __repr__(self):
return "\n".join([indent(s) for s in map(repr,self.value.items())]) + "\n"
class Array(PSFData):
def setValue(self, value):
dataclass, length = self.extarg
if value != None:
self.children = [dataclass(value=val) for val in value]
else:
self.children = [dataclass(value=None) for val in range(length)]
def getValue(self):
return [v.getValue() for v in self.children]
def __iter__(self):
return self.children.__iter__()
def __tuple__(self):
return tuple(self.children)
def __repr__(self):
return "\n".join([indent(s) for s in map(str,self.children)]) + "\n"
class Chunk:
"""Base class for chunk"""
def __init__(self, psf=None, type=None):
self.psf = psf
self.fileoffset=None
if not hasattr(self.__class__, 'type'):
self.type = type
self.verbose = False
self.name = ""
def deSerializeFile(self, file):
self.fileoffset = file.tell()
type = UInt32.fromFile(file)
if (self.type != None) and self.type != type:
file.seek(-UInt32.size, 1)
raise IncorrectChunk(type, self.type)
def __repr__(self):
return self.__class__.__name__
class NextSectionType(Chunk):
type=1
class NextSectionSweep(Chunk):
type=2
class NextSectionTrace(Chunk):
type=3
class NextSectionValues(Chunk):
type=4
class EndOfStructDef(Chunk):
type=18
NextSectionClasses = [NextSectionType, NextSectionSweep, NextSectionTrace, NextSectionValues]
class Property(Chunk):
type=None
valueclass=None
def __init__(self, name=None, value=None):
Chunk.__init__(self)
self.name = String(name)
self.value = self.valueclass(value)
def deSerializeFile(self, file):
Chunk.deSerializeFile(self, file)
self.name = String.fromFile(file)
self.value = self.valueclass.fromFile(file)
def toPSFasc(self, prec=9):
return self.name.toPSFasc() + " " + self.value.toPSFasc(prec=prec)
def __repr__(self):
return self.__class__.__name__+"("+str(self.name)+","+str(self.value)+")"
class PropertyString(Property):
type=33
valueclass=String
class PropertyUInt(Property):
type=34
valueclass=UInt32
class PropertyFloat64(Property):
type=35
valueclass=Float64
PropertyClasses = [PropertyString, PropertyUInt, PropertyFloat64]
TYPEFLOATDOUBLE = 11
TYPEINTBYTE = 1
TYPECOMPLEXDOUBLE = 12
TYPESTRUCT = 16
TYPESTRING = 2 ## Incorrect number
TYPEARRAY = 3 ## Incorrect number
TYPEINTLONG = 5
class DataTypeDef(Chunk):
"""Class representing data type of waveform data"""
type=16
ClassDict = {
TYPEFLOATDOUBLE: Float64,
TYPEINTBYTE: Int8,
TYPECOMPLEXDOUBLE: ComplexFloat64,
TYPESTRING: String,
TYPEARRAY: Array,
TYPEINTLONG: Int32
}
PSFASCDict = {
TYPEFLOATDOUBLE: "FLOAT DOUBLE",
TYPEINTBYTE: "INT BYTE",
TYPECOMPLEXDOUBLE: "COMPLEX DOUBLE",
TYPESTRING: "STRING *",
TYPEINTLONG: "INT LONG"
}
def __init__(self, psf, id=0, name=None, datatypeid=0, structdef=None):
Chunk.__init__(self, psf, type)
self.id = id
self.name = name
self.datatypeid = datatypeid
self.structdef = structdef
self.properties = []
def getDataObj(self):
"""Get a data object described by the DataType"""
if self.datatypeid == TYPESTRUCT:
return self.structdef.getDataObj()
elif self.datatypeid == TYPEARRAY:
return Array(extarg=(self.ClassDict[self.structdef[0]], self.structdef[1]))
else:
return self.ClassDict[self.datatypeid](extarg=self.structdef)
def toPSFasc(self, prec=None):
r=self.name.toPSFasc(prec) + " "
if self.datatypeid == TYPESTRUCT:
r+=self.structdef.toPSFasc(prec)
elif self.datatypeid == TYPEARRAY:
r+="ARRAY ( %s ) "%str(self.structdef[1])+self.PSFASCDict[self.structdef[0]]
else:
r+= self.PSFASCDict[self.datatypeid]
if len(self.properties)>0:
r+=" PROP(\n"
r+="\n".join([prop.toPSFasc(prec) for prop in self.properties])
r+="\n)"
return r
def getDataSize(self):
if self.datatypeid == TYPESTRUCT:
return self.structdef.getDataSize()
else:
return self.ClassDict[self.datatypeid].size
def deSerializeFile(self, file):
start = file.tell()
Chunk.deSerializeFile(self, file)
self.id = UInt32.fromFile(file)
self.name = String.fromFile(file)
arraytype = UInt32.fromFile(file)
self.datatypeid = UInt32.fromFile(file)
if arraytype != 0:
self.datatypeid, self.structdef = TYPEARRAY, (UInt32.fromFile(file), self.datatypeid)
if self.datatypeid == 16:
self.structdef = StructDef.fromFile(file, self.psf)
# Read possible property objects that belongs to the type by peeking ahead
while True:
oldpos = file.tell()
try:
prop = readChunk(self.psf, file, expectedclasses=PropertyClasses)
self.properties.append(prop)
except ValueError:
file.seek(oldpos)
break
def __repr__(self):
return self.__class__.__name__+"("+str({"name":self.name,"id":"0x%x"%self.id, "datatypeid":self.datatypeid,
"properties":self.properties})+")"
class DataTypeRef(Chunk):
type=16
"""Class representing link to data type"""
def __init__(self, psf, type=None):
Chunk.__init__(self, psf, type)
self.id = None
self.name = None
self.datatypeid = 0
self.properties = []
def getDataObj(self):
"""Get a data object described by the DataType"""
return self.psf.types.idMap[self.datatypeid].getDataObj()
def toPSFasc(self, prec=None):
r=self.name.toPSFasc(prec) + " "
r+=self.psf.types.idMap[self.datatypeid].name.toPSFasc()
if len(self.properties)>0:
r+=" PROP(\n"
r+="\n".join([prop.toPSFasc(prec) for prop in self.properties])
r+="\n)"
return r
def getDataSize(self):
return self.psf.types.idMap[self.datatypeid].getDataSize()
def deSerializeFile(self, file):
start = file.tell()
Chunk.deSerializeFile(self, file)
self.id = UInt32.fromFile(file)
self.name = String.fromFile(file)
self.datatypeid = UInt32.fromFile(file)
assert(self.datatypeid != 0)
# Read possible property objects that belongs to the type by peeking ahead
while True:
oldpos = file.tell()
try:
prop = readChunk(self.psf, file, expectedclasses=PropertyClasses)
self.properties.append(prop)
except ValueError:
file.seek(oldpos)
break
def __repr__(self):
return self.__class__.__name__+"("+str({"name":self.name,"id":"0x%x"%self.id, "datatypeid":self.datatypeid,
"properties":self.properties})+")"
class StructDef(PSFData):
"""Class representing struct definition"""
@classmethod
def fromFile(cls, file, psf):
obj = cls()
obj.deSerializeFile(file, psf)
return obj
def __init__(self):
self.children = []
def getDataObj(self):
return Struct(self)
def getDataSize(self):
return sum([child.getDataSize() for child in self.children])
def toPSFasc(self, prec=None):
s="STRUCT(\n"
for child in self.children:
s+=child.toPSFasc(prec)+"\n"
s+=")"
return s
def deSerializeFile(self, file, psf):
while True:
chunk = readChunk(psf, file, expectedclasses=[DataTypeDef, EndOfStructDef])
if isinstance(chunk, EndOfStructDef):
break
else:
self.children.append(chunk)
def __repr__(self):
return self.__class__.__name__ + "(\n"+\
"\n".join(map(str,self.children))+\
")\n"
class SimpleContainer(Chunk):
type = 21
def __init__(self, psf, type=None, childrenclslist=None, childrenclsignore=None):
Chunk.__init__(self, psf, type)
self.section = None
self.children = []
self.childrenclslist = childrenclslist
self.childrenclsignore = childrenclsignore
self.endpos = None
def getChunks(self):
return self.children
def deSerializeFile(self, file):
Chunk.deSerializeFile(self, file)
self.endpos = UInt32.fromFile(file).value
self.children = []
while file.tell() < self.endpos:
chunk = readChunk(self.psf, file, expectedclasses=self.childrenclslist+self.childrenclsignore)
if chunk.__class__ in self.childrenclslist:
self.children.append(chunk)
# Read trailing bytes
if self.endpos-file.tell() != 0:
warning("%d trailing bytes in %s"%(self.endpos-file.tell(), self.__class__.__name__))
self.tail = file.read(self.endpos-file.tell())
file.seek(self.endpos)
def __repr__(self):
s=""
if self.fileoffset:
s+= "0x%x"%self.fileoffset+ ":"
s+= self.__class__.__name__ + "(" + str(self.type) +")"
if self.endpos and self.fileoffset:
s+= "size="+str(self.endpos-self.fileoffset)
s+= "\n" + "\n".join([indent(s) for s in map(str,self.children)]) + "\n"
return s
class Container22(Chunk):
type=22
def __init__(self, psf, type=None, n=None, childrenclslist=None):
Chunk.__init__(self, psf, 22)
self.section = None
self.children = []
self.childrenclslist = childrenclslist
self.endpos = None
def getChunks(self):
return self.children
def deSerializeFile(self, file):
Chunk.deSerializeFile(self, file)
self.endpos = UInt32.fromFile(file).value # Save end position of Container
self.children = []
while file.tell() < self.endpos:
chunk = readChunk(self.psf, file,
expectedclasses=self.childrenclslist)
self.children.append(chunk)
# Read trailing bytes
if self.endpos-file.tell() != 0:
warning("%d trailing bytes in %s"%(self.endpos-file.tell(), self.__class__.__name__))
self.tail = file.read(self.endpos-file.tell())
file.seek(self.endpos)
def __repr__(self):
return "0x%x"%self.fileoffset +":" + self.__class__.__name__ +\
"(" + str(self.type) +")" + "\n" + "\n".join([indent(s) for s in map(str,self.children)]) + "\n"
class ZeroPad(Chunk):
type = 20
def deSerializeFile(self, file):
Chunk.deSerializeFile(self, file)
size = UInt32.fromFile(file).value
self.endpos = file.tell() + size
file.seek(self.endpos)
class HashTable(Chunk):
type = 19
"""Class representing offset of trace data"""
def __init__(self, psf, n=None):
Chunk.__init__(self, psf, type)
self.children = []
self.extra=[]
def deSerializeFile(self, file):
Chunk.deSerializeFile(self, file)
startpos = file.tell()
size = UInt32.fromFile(file)
for i in range(0, size/8):
id = UInt32.fromFile(file)
offset = UInt32.fromFile(file)
self.children.append((id, offset))
def __repr__(self):
return self.__class__.__name__+"\n"+ "\n".join([" 0x%x: 0x%x"%(k,v.value) for k,v in self.children])+")"
class HashTableTrace(Chunk):
type = 19
"""Class representing offset of trace data"""
def __init__(self, psf):
Chunk.__init__(self, psf, type)
self.children = []
def deSerializeFile(self, file):
Chunk.deSerializeFile(self, file)
self.size = UInt32.fromFile(file)
for i in range(0, self.size.value/16):
id = UInt32.fromFile(file)
offset = UInt32.fromFile(file)
data1 = UInt32.fromFile(file).value
data2 = UInt32.fromFile(file).value
self.children.append((id,offset,data1,data2))
def __repr__(self):
return self.__class__.__name__+"\n"+ "\n".join([" %s: 0x%x 0x%x 0x%x"%(pack(">I",k.value),v.value,d1,d2) for k,v,d1,d2 in self.children])+")"
class HashContainer(Chunk):
type=21
hashclass = HashTable
def __init__(self, psf, childrenclslist=None, childrenclsignore=None):
Chunk.__init__(self, psf, type)
self.section = None
self.children = []
self.childrenclslist = childrenclslist
self.childrenclsignore = childrenclsignore
self.endpos = None
self.hashtable = None
def __len__(self):
return len(self.children)
def getChunks(self):
return self.children
def deSerializeFile(self, file):
Chunk.deSerializeFile(self, file)
self.endpos = UInt32.fromFile(file).value
self.children = []
self.data = Container22(self.psf,
childrenclslist=self.childrenclslist)
self.data.deSerializeFile(file)
self.hashtable = self.hashclass(self.psf)
self.hashtable.deSerializeFile(file)
# Copy children reference from data
self.children = self.data.children
self.section = UInt32.fromFile(file)
# Read trailing bytes
if self.endpos-file.tell() != 0:
warning("%d trailing bytes in %s"%(self.endpos-file.tell(), self.__class__.__name__))
self.tail = file.read(self.endpos-file.tell())
file.seek(self.endpos)
def __repr__(self):
s=""
if self.fileoffset:
s += "0x%x"%self.fileoffset +":"
s += self.__class__.__name__ + "(" + str(self.type) +")"
if self.endpos:
s+=" size="+str(self.endpos-self.fileoffset) + "\n"
s += "\n".join([indent(s) for s in map(str,(self.children, self.hashtable))]) + "\n"
return s
class HeaderSection(SimpleContainer):
type=21
def __init__(self, psf, n=None):
SimpleContainer.__init__(self,psf, childrenclslist=PropertyClasses,
childrenclsignore=NextSectionClasses)
self.properties = {}
def addProperty(self, prop):
"""Add property to header"""
self.children.append(prop)
self.properties[prop.name] = prop.value
def deSerializeFile(self, file):
SimpleContainer.deSerializeFile(self, file)
# Read header properties
self.properties = {}
for prop in self.children:
self.properties[prop.name] = prop.value
def toPSFasc(self, prec=None):
r="HEADER\n"
r+='"PSFversion" "1.00"\n'
r+="\n".join([child.toPSFasc(prec) for child in self.children \
if not child.name.value[0:3].upper() == 'PSF'])
return r
class SweepSection(SimpleContainer):
type=21
def __init__(self, psf):
SimpleContainer.__init__(self, psf, childrenclslist=[DataTypeRef],
childrenclsignore=NextSectionClasses)
def deSerializeFile(self, file):
SimpleContainer.deSerializeFile(self, file)
# Read header properties
self.idMap = {}
for chunk in self.children:
self.idMap[chunk.id] = chunk
def getSweep(self, id):
return self.idMap[id]
def getNames(self):
return tuple([str(child.name) for child in self.children])
def toPSFasc(self, prec=None):
r="SWEEP\n"
r+="\n".join([child.toPSFasc(prec) for child in self.children])
return r
class TypeSection(HashContainer):
def __init__(self, psf):
HashContainer.__init__(self, psf, childrenclslist=[DataTypeDef],
childrenclsignore=NextSectionClasses)
self.idMap = {}
self.nameMap = {}
def addType(self, type):
type.id = self.psf.allocId()
self.children.append(type)
self.idMap[type.id] = type
self.nameMap[type.name] = type
def getType(self, id):
return self.idMap[id]
def getTypeByName(self, name):
return self.nameMap[name]
def deSerializeFile(self, file):
HashContainer.deSerializeFile(self, file)
# Read header properties
self.idMap = {}
for chunk in self.children:
self.idMap[chunk.id] = chunk
self.nameMap[chunk.name] = type
def toPSFasc(self, prec=None):
r="TYPE\n"
r+="\n".join([child.toPSFasc(prec) for child in self.children])
return r
class TraceSection(HashContainer):
hashclass = HashTableTrace
def __init__(self, psf):
HashContainer.__init__(self, psf, childrenclslist=[GroupDef, DataTypeRef])
self.idMap = {}
self.nameIndex = {}
def deSerializeFile(self, file):
HashContainer.deSerializeFile(self, file)
self.idMap = {}
for index, chunk in enumerate(self.children):
self.idMap[chunk.id] = chunk
if isinstance(chunk, GroupDef):
self.nameIndex.update(dict([(par, (index,)+value) for par,value in chunk.getNameIndex().items()]))
else:
self.nameIndex[chunk.name] = (index,)
def getNameIndex(self):
return self.nameIndex
def toPSFasc(self, prec=None):
r="TRACE\n"
r+="\n".join([child.toPSFasc(prec) for child in self.children])
return r
def getTraceNames(self):
result = []
for trace in self.children:
if isinstance(trace,GroupDef):
result += trace.getNames()
else:
result.append(trace.name)
return tuple(map(str, result))
def getTraceIndexByName(self, name):
"""Returns an index to the given trace name
The index is hierarchical so if if the traces are divided into 2 groups the index (0,1) means
child 1 of group 0
>>> psf=PSFReader('./test/psf/srcSweep')
>>> psf.open()
>>> psf.traces.getTraceIndexByName("VIN")
(0, 1)
>>> psf=PSFReader('./test/resultdirs/parsweep2/C=1e-12,R=1e-12/psf/ac.ac')
>>> psf.open()
>>> psf.traces.getTraceIndexByName("net3")
(0,)
"""
return self.nameIndex[name]
class ValuesSectionNonSweep(HashContainer):
type=21
def __init__(self, psf):
HashContainer.__init__(self, psf, childrenclslist=[NonSweepValue])
self.idMap={}
self.nameMap={}
def addValue(self, value):
value.id = self.psf.allocId()
if not isinstance(value, NonSweepValue):
raise ValueError("Value should be a NonSweepValue")
self.idMap[value.id] = value
self.nameMap[value.name] = value
self.children.append(value)
def deSerializeFile(self, file):
HashContainer.deSerializeFile(self, file)
for child in self.children:
self.nameMap[child.name] = child
def getValuePropertiesByName(self, name):
return dict([(prop.name, prop.value) for prop in self.nameMap[name].properties])
def getValueByName(self, name):
return self.nameMap[name].getValue()
def getValueNames(self):
return tuple([child.name for child in self.children])
def toPSFasc(self, prec=None):
r="VALUE\n"
r+="\n".join([child.toPSFasc(prec) for child in self.children])
return r
class ValuesSectionSweep(SimpleContainer):
type=21
def deSerializeFile(self, file):
Chunk.deSerializeFile(self, file)
self.endpos = UInt32.fromFile(file).value
windowedsweep = self.psf.header.properties.has_key('PSF window size')
if windowedsweep:
el = ZeroPad(self.psf)
el.deSerializeFile(file)
isweep=0
while isweep < self.psf.header.properties['PSF sweep points']:
if windowedsweep:
value = SweepValueWindowed(self.psf)
else:
value = SweepValueSimple(self.psf)
isweep += value.deSerializeFile(file, n=self.psf.header.properties['PSF sweep points']-isweep)
self.children.append(value)
self.section = UInt32.fromFile(file)
# Read trailing bytes
if self.endpos-file.tell() != 0:
warning("%d trailing bytes in %s"%(self.endpos-file.tell(), self.__class__.__name__))
self.tail = file.read(self.endpos-file.tell())
file.seek(self.endpos)
def getSweepParamValues(self):
return reduce(operator.__add__, [child.getSweepParamValues() for child in self.children])
def getValueNames(self):
return self.psf.traces.getTraceNames()
def __len__(self):
return len(self.psf.traces)
def getValueByName(self, name):
windowedsweep = self.psf.header.properties.has_key('PSF window size')
index = self.psf.traces.getTraceIndexByName(name)
result = []
for child in self.children:
obj=child
for i in index:
obj = obj.children[i]
# If windowed sweep, each child will be a list of values in the window
if windowedsweep:
result += [v.getValue() for v in obj]
else:
result.append(obj.getValue())
return numpy.array(result)
def toPSFasc(self, prec=None):
r="VALUE\n"
r+="\n".join([child.toPSFasc(prec) for child in self.children])
return r
class NonSweepValue(Chunk):
type=16
def __init__(self, psf, id=None, typeid=None, name=None, value=None):
Chunk.__init__(self, psf, type)
self.id = id
self.name = name
self.typeid = typeid
if typeid:
self.valuetype = self.psf.types.idMap[self.typeid]
else:
self.valuetype = None
if value:
self.value = value
elif self.valuetype:
self.value = self.valuetype.getDataObj()
else:
self.value = None
self.properties = []
def getValue(self):
return self.value.getValue()
def setValue(self, value):
self.value.setValue(value)
def deSerializeFile(self, file):
startpos = file.tell()
Chunk.deSerializeFile(self, file)
self.id = UInt32.fromFile(file)
self.name = String.fromFile(file)
self.typeid = UInt32.fromFile(file)
assert(self.typeid != 0)
self.valuetype = self.psf.types.idMap[self.typeid]
self.value = self.valuetype.getDataObj()
self.value.deSerializeFile(file)
# Read possible property objects that belongs to the type by peeking ahead
while True:
oldpos = file.tell()
try:
prop = readChunk(self.psf, file, expectedclasses=PropertyClasses)
self.properties.append(prop)
except ValueError:
file.seek(oldpos)
break
def toPSFasc(self, prec=None):
r = self.name.toPSFasc(prec) + " " + self.valuetype.name.toPSFasc(prec) + " " + self.value.toPSFasc(prec)
if len(self.properties)>0:
r+=" PROP(\n"
r+="\n".join([prop.toPSFasc(prec) for prop in self.properties])
r+="\n)"
return r
def __repr__(self):
return self.__class__.__name__+"("+str({"name":self.name, "id":"0x%x"%self.id, "typeid":"0x%x"%self.typeid,
"properties":self.properties,"value":self.value})+")"
class SweepValue(Chunk):
"""Class representing waveform data"""
type = 16
def __init__(self, psf, type=None):
Chunk.__init__(self, psf, type)
self.id = None
self.linktypeid = UInt32()
self.datatypeid = UInt32()
self.paramtype = None
self.paramvalue = None
self.children = []
self.properties = []
def deSerializeFile(self, file, n=None):
pass
def getSweepParamValues(self):
pass
def __len__(self):
return len(self.children)
def __repr__(self):
return self.__class__.__name__ + "(" + str(self.paramtype.name) + "=" + str(self.paramvalue) +","+ \
"children="+str(self.children) +")\n"
class SweepValueSimple(SweepValue):
def deSerializeFile(self, file, n=None):
Chunk.deSerializeFile(self, file)
self.paramtypeid = UInt32.fromFile(file)
self.paramtype = self.psf.sweeps.getSweep(self.paramtypeid)
self.paramvalue = self.paramtype.getDataObj()
self.paramvalue.deSerializeFile(file)
for datatype in self.psf.traces.children:
datatypeid = UInt32.fromFile(file)
if datatypeid in (17,16):
valuetypeid = UInt32.fromFile(file)
if valuetypeid != datatype.id:
## Unexpected value type id found
## This is probably because of missing trace values
## Undo read of datatypeid, valuetypeid and break out of loop and
file.seek(-2*UInt32.size, 1)
break
value = datatype.getDataObj()
value.deSerializeFile(file)
self.children.append(value)
elif datatypeid == 15:
## End of section
file.seek(-UInt32.size, 1)
break
else:
raise Exception("Datatypeid unknown 0x%x" % datatypeid)
return 1
def getSweepParamValues(self):
return [self.paramvalue.getValue()]
def toPSFasc(self, prec=None):
r=self.paramtype.name.toPSFasc(prec) + " " +self.paramvalue.toPSFasc(prec)+"\n"
r+="\n".join([valuetype.name.toPSFasc(prec) + " " + value.toPSFasc(prec) \
for valuetype, value in zip(self.psf.traces.children, self.children)])
return r
class SweepValueWindowed(SweepValue):
def deSerializeFile(self, file, n=None):
bufferstart = file.tell()
Chunk.deSerializeFile(self, file)
self.paramtypeid = UInt32.fromFile(file)
assert(len(self.psf.sweeps.children) == 1)
self.paramtype=self.psf.sweeps.children[0]
self.paramvalue = []
# Get sweep parameter values
paramvaluesize = self.paramtype.getDataSize()
windowsize = self.psf.header.properties['PSF window size'].value
leftinwindow = (file.tell()//windowsize + 1)*windowsize - file.tell()
windowlen = leftinwindow//paramvaluesize;
if n > windowlen:
n = windowlen
for j in xrange(n):
paramvalue = self.paramtype.getDataObj()
paramvalue.deSerializeFile(file)
if j < n:
self.paramvalue.append(paramvalue)
# Get trace values
for trace in self.psf.traces.children:
value = trace.getDataObj()
value.deSerializeFile(file, count=n,
windowsize=self.psf.header.properties['PSF window size'].value)
self.children.append(value)
# Skip trailing padding bytes
padsize = int((self.psf.header.properties['PSF buffer size'] - (file.tell()-bufferstart))% \
self.psf.header.properties['PSF buffer size'])
file.seek(padsize, 1)
return n
def getSweepParamValues(self):
return [v.getValue() for v in self.paramvalue]
def toPSFasc(self, prec=None):
r=''
for i, paramvalue in enumerate(self.paramvalue):
r+=self.paramtype.name.toPSFasc(prec) + " " + paramvalue.toPSFasc(prec) + "\n"
r+="\n".join([trace.name.toPSFasc(prec) + " " + value.toPSFasc(prec=prec, index=i) \
for trace,value in zip(self.psf.traces.children, self.children)])
if i < len(self.paramvalue)-1:
r+="\n"
return r
class GroupData(PSFData):
def __init__(self, groupdef):
PSFData.__init__(self)
self.groupdef = groupdef
self.children = []
def deSerializeFile(self, file, count=None, windowsize=None):
for element in self.groupdef.children:
if count==None:
value = element.getDataObj()
value.deSerializeFile(file)
self.children.append(value)
else:
valuearray=[]
# If a window is used in the PSF file, the entire window is stored
# and the data is aligned to the end of the window. So we need
# to skip window size - data size
file.seek(int(windowsize - count*element.getDataSize()), 1)
for i in xrange(0,count):
value = element.getDataObj()
value.deSerializeFile(file)
valuearray.append(value)
self.children.append(valuearray)
def toPSFasc(self, prec=None, index=None):
if index != None:
return "\n".join([v[index].toPSFasc(prec) for v in self.children])
else:
return "\n".join([v.toPSFasc(prec) for v in self.children])
def getSize(self):
return self.groupdef.getDataSize()
def __repr__(self):
return "GroupData" + "\n" + "\n".join([indent(s) for s in map(repr,self.children)]) + "\n"
class GroupDef(Chunk):
type=17
"""Class representing group of traces"""
def __init__(self, psf):
Chunk.__init__(self, psf)
self.children=[]
self.datasize=None
def getDataObj(self):
return GroupData(self)
def deSerializeFile(self, file):
Chunk.deSerializeFile(self, file)
self.id = UInt32.fromFile(file)
self.name = String.fromFile(file)
self.nchildren = UInt32.fromFile(file)
# Read children
self.children = []
self.datasize = 0
for i in range(0, self.nchildren):
child = DataTypeRef(self.psf)
child.deSerializeFile(file)
self.children.append(child)
self.datasize += child.getDataSize()
def getNameIndex(self):
return dict([(v.name, (i,)) for i,v in enumerate(self.children)])
def toPSFasc(self, prec=None):
s=self.name.toPSFasc(prec) + " GROUP %d\n"%len(self.children)
s+="\n".join([child.toPSFasc(prec) for child in self.children])
return s
def getDataSize(self):
return self.datasize
def getNames(self):
return [str(child.name) for child in self.children]
def __repr__(self):
return "0x%x"%self.fileoffset +":" + self.__class__.__name__+ "(id=0x%x"%self.id+", nchildren=%d"%self.nchildren+")\n" + "\n".join([indent(s) for s in map(str,self.children)]) + "\n"
class UnknownChunk(Exception):
def __init__(self, chunktype):
self.type = chunktype
def __str__(self):
return "Unknown chunk of type: %d"%self.type
class InvalidChunk(Exception):
def __init__(self, chunk):
self.chunk = chunk
def __str__(self):
return "Invalid %s"%(self.chunk.__class__.__name__)
class IncorrectChunk(Exception):
def __init__(self, type, expectedtype):
self.type = type
self.expectedtype = expectedtype
def __str__(self):
return "Incorrect chunk type %d (should be %d)"%(self.type, self.expectedtype)
class LastValue(Exception):
pass
def readChunk(psf, file, expectedclasses=None):
type = UInt32.fromFile(file)
file.seek(-4, 1) # Rewind one word since the type will be read again by the deSerializeFile function
if expectedclasses:
if not type in [cls.type for cls in expectedclasses]:
raise ValueError("Unexpected type %d, not in "%type + str([cls.type for cls in expectedclasses]))
for cls in expectedclasses:
if type == cls.type:
chunk = cls(psf)
else:
raise Exception("Use expectedclasses!")
if type == 21:
chunk = Section(psf)
elif type == 20:
chunk = ZeroPad(psf)
elif type == 22:
chunk = Container22(psf, type, n=n)
elif type == 33:
chunk = PropertyString(psf)
elif type == 34:
chunk = PropertyUInt(psf)
elif type == 35:
chunk = PropertyFloat64(psf)
elif type == 16:
chunk = DataTypeDef(psf,type)
elif type == 17:
chunk = GroupDef(psf)
elif type == 19:
chunk = HashTable(psf, n=n)
elif type in (1,2,3,4):
file.seek(4,1)
return None
else:
warning("Unknown chunk %d"%type)
raise UnknownChunk(type)
chunk.deSerializeFile(file)
return chunk
class PSFReader(object):
def __init__(self, filename=None, asc=None):
self.header = None
self.types = TypeSection(self)
self.sweeps = None
self.traces = None
self.lastid = 0x1000
self.verbose = False
self.filename = filename
self.file = None
self.values = None
self.asc = asc
def open(self):
"""Open a PSF file and read its headers.
Example:
Trying to open a valid psf file
>>> psf=PSFReader('./test/psf/srcSweep')
>>> psf.open()
"""
if self.asc == None:
self.asc = False
if not self.asc:
self.file = open(self.filename, "rb")
if self.validate():
self.deSerializeFile(self.file)
else:
raise PSFInvalid("Invalid PSF file")
else:
newpsfobj = psfasc.parse("psfasc", open(self.filename).read())
self.header = newpsfobj.header
self.types = newpsfobj.types
self.sweeps = newpsfobj.sweeps
self.traces = newpsfobj.traces
self.values = newpsfobj.values
self.lastid = newpsfobj.lastid
self.verbose = newpsfobj.verbose
def validate(self):
"""Check if the PSF file is valid.
Returns True if valid, False otherwise
>>> psf=PSFReader('./test/psf/srcSweep')
>>> psf.validate()
True
>>> psf=PSFReader('./test/psfasc/srcSweep.asc')
>>> psf.validate()
False
"""
if self.file == None:
file = open(self.filename, "rb")
else:
file = self.file
# Read Clarissa signature
file.seek(-4-8,2)
clarissa = file.read(8)
return clarissa == "Clarissa"
def getNSweepPoints(self):
"""Returns number of sweeps. 0 if not swept.
>>> psf=PSFReader('./test/psf/srcSweep')
>>> psf.open()
>>> psf.getNSweepPoints()
4
"""
if self.file == None:
ValueError("Please open the PSF file first")
return self.header.properties['PSF sweep points']
def getNSweeps(self):
"""Returns the number of nested sweeps
>>> psf=PSFReader('./test/psf/srcSweep')
>>> psf.open()
>>> psf.getNSweeps()
1
"""
if self.file == None:
ValueError("Please open the PSF file first")
return self.header.properties['PSF sweeps']
def __len__(self):
return len(self.values)
def getValueNames(self):
"""Returns a tuple of the names of the traces
>>> psf=PSFReader('./test/psf/srcSweep')
>>> psf.getValueNames()
>>> psf.open()
>>> psf.getValueNames()
('VOUT', 'VIN', 'R0')
>>> psf=PSFReader('./test/resultdirs/simple/opBegin')
>>> psf.open()
>>> psf.getValueNames()
('R0', 'V1', 'V0', 'E0', 'VIN', 'NET9', 'VOUT')
"""
if self.values:
return self.values.getValueNames()
def getSweepParamNames(self):
return self.sweeps.getNames()
def getSweepParamValues(self, dim=0):
"""Returns a numpy.array of sweep parameter values for sweep dimension dim.
>>> psf=PSFReader('./test/psf/srcSweep')
>>> psf.open()
>>> psf.getSweepParamValues(0)
array([ 1., 2., 3., 4.])
windowed result
>>> psf=PSFReader('./test/psf/timeSweep')
>>> psf.open()
>>> psf.getSweepParamValues(0)[:3]
array([ 0.00000000e+00, 2.00000000e-11, 5.33333333e-11])
"""
return numpy.array(self.values.getSweepParamValues())
def getValuePropertiesByName(self, name):
"""Returns the properties associated with value
>>> psf=PSFReader('./test/psf/opBegin')
>>> psf.open()
>>> psf.getValuePropertiesByName("XIRXRFMIXTRIM0.XM1PDAC1.XMN.MAIN")["Region"]
'subthreshold'
"""
return self.values.getValuePropertiesByName(name)
def getValuesByName(self, name):
"""Returns a numpy.array of trace values for swept results and a scalar for non swept.
Example:
swept psf file
>>> psf=PSFReader('./test/psf/srcSweep')
>>> psf.open()
>>> psf.getValuesByName("VOUT")
array([-6., -4., -2., 0.])
>>> psf.getValuesByName("VIN")
array([ 1., 2., 3., 4.])
swept psf with complex numbers
>>> psf=PSFReader('./test/psf/frequencySweep')
>>> psf.open()
>>> res = psf.getValuesByName("ANT_CM")
>>> len(res)
123
>>> res[:3]
array([ 0.6+0.j, 0. +0.j, 0. +0.j])
swept windowed psf file
>>> psf=PSFReader('./test/psf/timeSweep')
>>> psf.open()
>>> psf.getValuesByName("INP")[0:3]
array([ 0.6 , 0.62486899, 0.66211478])
non-swept psf file
>>> psf=PSFReader('./test/psf/dcOpInfo.info')
>>> psf.open()
>>> psf.getValuesByName("IREG21U_0.MP5.b1")['betadc']
4.7957014499434756
swept psf file withouth groups
>>> psf=PSFReader('./test/resultdirs/parsweep/C=1e-12,R=1e-12/psf/ac.ac')
>>> psf.open()
>>> psf.getValuesByName("net3")
array([ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j])
"""
return self.values.getValueByName(name)
def nTraces(self):
"""Returns number of traces
>>> psf=PSFReader('./test/psf/srcSweep')
>>> psf.open()
>>> psf.nTraces()
3
"""
if self.file == None:
ValueError("Please open the PSF file first")
return self.header.properties['PSF traces']
def allocId(self):
self.lastid+=1
return self.lastid-1
def info(self):
s="Number of sweeps: %d\n"%self.getNSweeps()
if self.getNSweeps() > 0:
s+="Number of sweep points: %d\n"%self.getNSweepPoints()
s+="Number of traces: %d"%self.nTraces()
return s
def updateHeader(self):
if self.sweeps:
sweeps = len(self.sweeps.children)
else:
sweeps=0
self.header.addProperty(PropertyUInt("PSF sweeps", sweeps))
def deSerializeFile(self, file):
# Find filesize
file.seek(0,2)
filesize = file.tell()
# Last word contains the size of the data
file.seek(-4,2)
datasize = UInt32.fromFile(file).value
if self.verbose:
print "Total data size: ",datasize
# Read Clarissa signature
file.seek(-4-8,2)
clarissa = file.read(8)
if not clarissa == "Clarissa":
raise ValueError("Clarissa signature not found")
# Read section index table
sectionoffsets = {}
file.seek(-4-8-8,2)
pos = file.tell()
sectionnums = []
while file.tell() >= datasize:
sectionnum = UInt32.fromFile(file)
sectionnums.insert(0,sectionnum.value)
offset = UInt32.fromFile(file)
sectionoffsets[sectionnum] = offset
pos -= 8
file.seek(pos)
offsets = [sectionoffsets[secnum] for secnum in sectionnums]
sizes = map(operator.sub, offsets[1:]+[datasize], offsets)
sectionsizes = dict(zip(sectionnums, sizes))
if self.verbose:
print sectionoffsets, sectionsizes
file.seek(0)
self.unk1 = UInt32.fromFile(file)
if self.verbose:
print "First word: 0x%x"%self.unk1
# Load headers
file.seek(int(sectionoffsets[0]))
self.header = HeaderSection(self)
self.header.deSerializeFile(file)
if self.verbose:
print "HEADER"
print self.header
if sectionoffsets.has_key(1):
file.seek(int(sectionoffsets[1]))
self.types.deSerializeFile(file)
if self.verbose:
print "TYPE"
print self.types
if sectionoffsets.has_key(2):
file.seek(int(sectionoffsets[2]))
self.sweeps = SweepSection(self)
self.sweeps.deSerializeFile(file)
if self.verbose:
print "SWEEPS"
print self.sweeps
if sectionoffsets.has_key(3):
file.seek(int(sectionoffsets[3]))
self.traces = TraceSection(self)
self.traces.deSerializeFile(file)
if sectionoffsets.has_key(4):
file.seek(int(sectionoffsets[4]))
# Load data
if self.sweeps:
self.values = ValuesSectionSweep(self)
else:
self.values = ValuesSectionNonSweep(self)
self.values.deSerializeFile(file)
def printme(self):
print "HEADER"
print self.header
print "TYPES"
print self.types
if self.sweeps:
print "SWEEP"
print self.sweeps
if self.traces:
print "TRACE"
print self.traces
print "VALUES"
print self.values
def toPSFasc(self, prec=None):
"""Export to PSF ascii"""
sections = [self.header.toPSFasc(prec), self.types.toPSFasc(prec)]
if self.sweeps:
sections.append(self.sweeps.toPSFasc(prec))
if self.traces:
sections.append(self.traces.toPSFasc(prec))
if self.values:
sections.append(self.values.toPSFasc(prec))
r="\n".join(sections) + "\n"
r+="END\n"
return r
def __repr__(self):
return "\n".join(map(str, (self.header, self.types, self.sweeps, self.traces, self.values)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| mit |
jemmyw/ansible | v1/ansible/runner/lookup_plugins/inventory_hostnames.py | 173 | 1756 | # (c) 2012, Michael DeHaan <[email protected]>
# (c) 2013, Steven Dossett <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible.utils import safe_eval
import ansible.utils as utils
import ansible.errors as errors
import ansible.inventory as inventory
def flatten(terms):
ret = []
for term in terms:
if isinstance(term, list):
ret.extend(term)
else:
ret.append(term)
return ret
class LookupModule(object):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
if 'runner' in kwargs:
self.host_list = kwargs['runner'].inventory.host_list
else:
raise errors.AnsibleError("inventory_hostnames must be used as a loop. Example: \"with_inventory_hostnames: \'all\'\"")
def run(self, terms, inject=None, **kwargs):
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
if not isinstance(terms, list):
raise errors.AnsibleError("with_inventory_hostnames expects a list")
return flatten(inventory.Inventory(self.host_list).list_hosts(terms))
| gpl-3.0 |
when30/namebench | nb_third_party/dns/rdtypes/IN/NAPTR.py | 248 | 4889 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import dns.exception
import dns.name
import dns.rdata
def _write_string(file, s):
l = len(s)
assert l < 256
byte = chr(l)
file.write(byte)
file.write(s)
class NAPTR(dns.rdata.Rdata):
"""NAPTR record
@ivar order: order
@type order: int
@ivar preference: preference
@type preference: int
@ivar flags: flags
@type flags: string
@ivar service: service
@type service: string
@ivar regexp: regular expression
@type regexp: string
@ivar replacement: replacement name
@type replacement: dns.name.Name object
@see: RFC 3403"""
__slots__ = ['order', 'preference', 'flags', 'service', 'regexp',
'replacement']
def __init__(self, rdclass, rdtype, order, preference, flags, service,
regexp, replacement):
super(NAPTR, self).__init__(rdclass, rdtype)
self.order = order
self.preference = preference
self.flags = flags
self.service = service
self.regexp = regexp
self.replacement = replacement
def to_text(self, origin=None, relativize=True, **kw):
replacement = self.replacement.choose_relativity(origin, relativize)
return '%d %d "%s" "%s" "%s" %s' % \
(self.order, self.preference,
dns.rdata._escapify(self.flags),
dns.rdata._escapify(self.service),
dns.rdata._escapify(self.regexp),
self.replacement)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
order = tok.get_uint16()
preference = tok.get_uint16()
flags = tok.get_string()
service = tok.get_string()
regexp = tok.get_string()
replacement = tok.get_name()
replacement = replacement.choose_relativity(origin, relativize)
tok.get_eol()
return cls(rdclass, rdtype, order, preference, flags, service,
regexp, replacement)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
two_ints = struct.pack("!HH", self.order, self.preference)
file.write(two_ints)
_write_string(file, self.flags)
_write_string(file, self.service)
_write_string(file, self.regexp)
self.replacement.to_wire(file, compress, origin)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
(order, preference) = struct.unpack('!HH', wire[current : current + 4])
current += 4
rdlen -= 4
strings = []
for i in xrange(3):
l = ord(wire[current])
current += 1
rdlen -= 1
if l > rdlen or rdlen < 0:
raise dns.exception.FormError
s = wire[current : current + l]
current += l
rdlen -= l
strings.append(s)
(replacement, cused) = dns.name.from_wire(wire[: current + rdlen],
current)
if cused != rdlen:
raise dns.exception.FormError
if not origin is None:
replacement = replacement.relativize(origin)
return cls(rdclass, rdtype, order, preference, strings[0], strings[1],
strings[2], replacement)
from_wire = classmethod(from_wire)
def choose_relativity(self, origin = None, relativize = True):
self.replacement = self.replacement.choose_relativity(origin,
relativize)
def _cmp(self, other):
sp = struct.pack("!HH", self.order, self.preference)
op = struct.pack("!HH", other.order, other.preference)
v = cmp(sp, op)
if v == 0:
v = cmp(self.flags, other.flags)
if v == 0:
v = cmp(self.service, other.service)
if v == 0:
v = cmp(self.regexp, other.regexp)
if v == 0:
v = cmp(self.replacement, other.replacement)
return v
| apache-2.0 |
rebstar6/servo | tests/wpt/css-tests/tools/pywebsocket/src/test/test_endtoend.py | 449 | 26811 | #!/usr/bin/env python
#
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""End-to-end tests for pywebsocket. Tests standalone.py by default. You
can also test mod_pywebsocket hosted on an Apache server by setting
_use_external_server to True and modifying _external_server_port to point to
the port on which the Apache server is running.
"""
import logging
import os
import signal
import socket
import subprocess
import sys
import time
import unittest
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from test import client_for_testing
from test import mux_client_for_testing
# Special message that tells the echo server to start closing handshake
_GOODBYE_MESSAGE = 'Goodbye'
_SERVER_WARMUP_IN_SEC = 0.2
# If you want to use external server to run end to end tests, set following
# parameters correctly.
_use_external_server = False
_external_server_port = 0
# Test body functions
def _echo_check_procedure(client):
client.connect()
client.send_message('test')
client.assert_receive('test')
client.send_message('helloworld')
client.assert_receive('helloworld')
client.send_close()
client.assert_receive_close()
client.assert_connection_closed()
def _echo_check_procedure_with_binary(client):
client.connect()
client.send_message('binary', binary=True)
client.assert_receive('binary', binary=True)
client.send_message('\x00\x80\xfe\xff\x00\x80', binary=True)
client.assert_receive('\x00\x80\xfe\xff\x00\x80', binary=True)
client.send_close()
client.assert_receive_close()
client.assert_connection_closed()
def _echo_check_procedure_with_goodbye(client):
client.connect()
client.send_message('test')
client.assert_receive('test')
client.send_message(_GOODBYE_MESSAGE)
client.assert_receive(_GOODBYE_MESSAGE)
client.assert_receive_close()
client.send_close()
client.assert_connection_closed()
def _echo_check_procedure_with_code_and_reason(client, code, reason):
client.connect()
client.send_close(code, reason)
client.assert_receive_close(code, reason)
client.assert_connection_closed()
def _unmasked_frame_check_procedure(client):
client.connect()
client.send_message('test', mask=False)
client.assert_receive_close(client_for_testing.STATUS_PROTOCOL_ERROR, '')
client.assert_connection_closed()
def _mux_echo_check_procedure(mux_client):
mux_client.connect()
mux_client.send_flow_control(1, 1024)
logical_channel_options = client_for_testing.ClientOptions()
logical_channel_options.server_host = 'localhost'
logical_channel_options.server_port = 80
logical_channel_options.origin = 'http://localhost'
logical_channel_options.resource = '/echo'
mux_client.add_channel(2, logical_channel_options)
mux_client.send_flow_control(2, 1024)
mux_client.send_message(2, 'test')
mux_client.assert_receive(2, 'test')
mux_client.add_channel(3, logical_channel_options)
mux_client.send_flow_control(3, 1024)
mux_client.send_message(2, 'hello')
mux_client.send_message(3, 'world')
mux_client.assert_receive(2, 'hello')
mux_client.assert_receive(3, 'world')
# Don't send close message on channel id 1 so that server-initiated
# closing handshake won't occur.
mux_client.send_close(2)
mux_client.send_close(3)
mux_client.assert_receive_close(2)
mux_client.assert_receive_close(3)
mux_client.send_physical_connection_close()
mux_client.assert_physical_connection_receive_close()
class EndToEndTestBase(unittest.TestCase):
"""Base class for end-to-end tests that launch pywebsocket standalone
server as a separate process, connect to it using the client_for_testing
module, and check if the server behaves correctly by exchanging opening
handshake and frames over a TCP connection.
"""
def setUp(self):
self.server_stderr = None
self.top_dir = os.path.join(os.path.split(__file__)[0], '..')
os.putenv('PYTHONPATH', os.path.pathsep.join(sys.path))
self.standalone_command = os.path.join(
self.top_dir, 'mod_pywebsocket', 'standalone.py')
self.document_root = os.path.join(self.top_dir, 'example')
s = socket.socket()
s.bind(('localhost', 0))
(_, self.test_port) = s.getsockname()
s.close()
self._options = client_for_testing.ClientOptions()
self._options.server_host = 'localhost'
self._options.origin = 'http://localhost'
self._options.resource = '/echo'
# TODO(toyoshim): Eliminate launching a standalone server on using
# external server.
if _use_external_server:
self._options.server_port = _external_server_port
else:
self._options.server_port = self.test_port
# TODO(tyoshino): Use tearDown to kill the server.
def _run_python_command(self, commandline, stdout=None, stderr=None):
return subprocess.Popen([sys.executable] + commandline, close_fds=True,
stdout=stdout, stderr=stderr)
def _run_server(self):
args = [self.standalone_command,
'-H', 'localhost',
'-V', 'localhost',
'-p', str(self.test_port),
'-P', str(self.test_port),
'-d', self.document_root]
# Inherit the level set to the root logger by test runner.
root_logger = logging.getLogger()
log_level = root_logger.getEffectiveLevel()
if log_level != logging.NOTSET:
args.append('--log-level')
args.append(logging.getLevelName(log_level).lower())
return self._run_python_command(args,
stderr=self.server_stderr)
def _kill_process(self, pid):
if sys.platform in ('win32', 'cygwin'):
subprocess.call(
('taskkill.exe', '/f', '/pid', str(pid)), close_fds=True)
else:
os.kill(pid, signal.SIGKILL)
class EndToEndHyBiTest(EndToEndTestBase):
def setUp(self):
EndToEndTestBase.setUp(self)
def _run_test_with_client_options(self, test_function, options):
server = self._run_server()
try:
# TODO(tyoshino): add some logic to poll the server until it
# becomes ready
time.sleep(_SERVER_WARMUP_IN_SEC)
client = client_for_testing.create_client(options)
try:
test_function(client)
finally:
client.close_socket()
finally:
self._kill_process(server.pid)
def _run_test(self, test_function):
self._run_test_with_client_options(test_function, self._options)
def _run_deflate_frame_test(self, test_function):
server = self._run_server()
try:
time.sleep(_SERVER_WARMUP_IN_SEC)
self._options.enable_deflate_frame()
client = client_for_testing.create_client(self._options)
try:
test_function(client)
finally:
client.close_socket()
finally:
self._kill_process(server.pid)
def _run_permessage_deflate_test(
self, offer, response_checker, test_function):
server = self._run_server()
try:
time.sleep(_SERVER_WARMUP_IN_SEC)
self._options.extensions += offer
self._options.check_permessage_deflate = response_checker
client = client_for_testing.create_client(self._options)
try:
client.connect()
if test_function is not None:
test_function(client)
client.assert_connection_closed()
finally:
client.close_socket()
finally:
self._kill_process(server.pid)
def _run_close_with_code_and_reason_test(self, test_function, code,
reason):
server = self._run_server()
try:
time.sleep(_SERVER_WARMUP_IN_SEC)
client = client_for_testing.create_client(self._options)
try:
test_function(client, code, reason)
finally:
client.close_socket()
finally:
self._kill_process(server.pid)
def _run_http_fallback_test(self, options, status):
server = self._run_server()
try:
time.sleep(_SERVER_WARMUP_IN_SEC)
client = client_for_testing.create_client(options)
try:
client.connect()
self.fail('Could not catch HttpStatusException')
except client_for_testing.HttpStatusException, e:
self.assertEqual(status, e.status)
except Exception, e:
self.fail('Catch unexpected exception')
finally:
client.close_socket()
finally:
self._kill_process(server.pid)
def _run_mux_test(self, test_function):
server = self._run_server()
try:
time.sleep(_SERVER_WARMUP_IN_SEC)
client = mux_client_for_testing.MuxClient(self._options)
try:
test_function(client)
finally:
client.close_socket()
finally:
self._kill_process(server.pid)
def test_echo(self):
self._run_test(_echo_check_procedure)
def test_echo_binary(self):
self._run_test(_echo_check_procedure_with_binary)
def test_echo_server_close(self):
self._run_test(_echo_check_procedure_with_goodbye)
def test_unmasked_frame(self):
self._run_test(_unmasked_frame_check_procedure)
def test_echo_deflate_frame(self):
self._run_deflate_frame_test(_echo_check_procedure)
def test_echo_deflate_frame_server_close(self):
self._run_deflate_frame_test(
_echo_check_procedure_with_goodbye)
def test_echo_permessage_deflate(self):
def test_function(client):
# From the examples in the spec.
compressed_hello = '\xf2\x48\xcd\xc9\xc9\x07\x00'
client._stream.send_data(
compressed_hello,
client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.assert_receive_binary(
compressed_hello,
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
self.assertEquals('permessage-deflate', parameter.name())
self.assertEquals([], parameter.get_parameters())
self._run_permessage_deflate_test(
['permessage-deflate'],
response_checker,
test_function)
def test_echo_permessage_deflate_two_frames(self):
def test_function(client):
# From the examples in the spec.
client._stream.send_data(
'\xf2\x48\xcd',
client_for_testing.OPCODE_TEXT,
end=False,
rsv1=1)
client._stream.send_data(
'\xc9\xc9\x07\x00',
client_for_testing.OPCODE_TEXT)
client._stream.assert_receive_binary(
'\xf2\x48\xcd\xc9\xc9\x07\x00',
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
self.assertEquals('permessage-deflate', parameter.name())
self.assertEquals([], parameter.get_parameters())
self._run_permessage_deflate_test(
['permessage-deflate'],
response_checker,
test_function)
def test_echo_permessage_deflate_two_messages(self):
def test_function(client):
# From the examples in the spec.
client._stream.send_data(
'\xf2\x48\xcd\xc9\xc9\x07\x00',
client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.send_data(
'\xf2\x00\x11\x00\x00',
client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.assert_receive_binary(
'\xf2\x48\xcd\xc9\xc9\x07\x00',
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.assert_receive_binary(
'\xf2\x00\x11\x00\x00',
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
self.assertEquals('permessage-deflate', parameter.name())
self.assertEquals([], parameter.get_parameters())
self._run_permessage_deflate_test(
['permessage-deflate'],
response_checker,
test_function)
def test_echo_permessage_deflate_two_msgs_server_no_context_takeover(self):
def test_function(client):
# From the examples in the spec.
client._stream.send_data(
'\xf2\x48\xcd\xc9\xc9\x07\x00',
client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.send_data(
'\xf2\x00\x11\x00\x00',
client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.assert_receive_binary(
'\xf2\x48\xcd\xc9\xc9\x07\x00',
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.assert_receive_binary(
'\xf2\x48\xcd\xc9\xc9\x07\x00',
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
self.assertEquals('permessage-deflate', parameter.name())
self.assertEquals([('server_no_context_takeover', None)],
parameter.get_parameters())
self._run_permessage_deflate_test(
['permessage-deflate; server_no_context_takeover'],
response_checker,
test_function)
def test_echo_permessage_deflate_preference(self):
def test_function(client):
# From the examples in the spec.
compressed_hello = '\xf2\x48\xcd\xc9\xc9\x07\x00'
client._stream.send_data(
compressed_hello,
client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.assert_receive_binary(
compressed_hello,
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
self.assertEquals('permessage-deflate', parameter.name())
self.assertEquals([], parameter.get_parameters())
self._run_permessage_deflate_test(
['permessage-deflate', 'deflate-frame'],
response_checker,
test_function)
def test_echo_permessage_deflate_with_parameters(self):
def test_function(client):
# From the examples in the spec.
compressed_hello = '\xf2\x48\xcd\xc9\xc9\x07\x00'
client._stream.send_data(
compressed_hello,
client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.assert_receive_binary(
compressed_hello,
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
self.assertEquals('permessage-deflate', parameter.name())
self.assertEquals([('server_max_window_bits', '10'),
('server_no_context_takeover', None)],
parameter.get_parameters())
self._run_permessage_deflate_test(
['permessage-deflate; server_max_window_bits=10; '
'server_no_context_takeover'],
response_checker,
test_function)
def test_echo_permessage_deflate_with_bad_server_max_window_bits(self):
def test_function(client):
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
raise Exception('Unexpected acceptance of permessage-deflate')
self._run_permessage_deflate_test(
['permessage-deflate; server_max_window_bits=3000000'],
response_checker,
test_function)
def test_echo_permessage_deflate_with_bad_server_max_window_bits(self):
def test_function(client):
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
raise Exception('Unexpected acceptance of permessage-deflate')
self._run_permessage_deflate_test(
['permessage-deflate; server_max_window_bits=3000000'],
response_checker,
test_function)
def test_echo_permessage_deflate_with_undefined_parameter(self):
def test_function(client):
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
raise Exception('Unexpected acceptance of permessage-deflate')
self._run_permessage_deflate_test(
['permessage-deflate; foo=bar'],
response_checker,
test_function)
def test_echo_close_with_code_and_reason(self):
self._options.resource = '/close'
self._run_close_with_code_and_reason_test(
_echo_check_procedure_with_code_and_reason, 3333, 'sunsunsunsun')
def test_echo_close_with_empty_body(self):
self._options.resource = '/close'
self._run_close_with_code_and_reason_test(
_echo_check_procedure_with_code_and_reason, None, '')
def test_mux_echo(self):
self._run_mux_test(_mux_echo_check_procedure)
def test_close_on_protocol_error(self):
"""Tests that the server sends a close frame with protocol error status
code when the client sends data with some protocol error.
"""
def test_function(client):
client.connect()
# Intermediate frame without any preceding start of fragmentation
# frame.
client.send_frame_of_arbitrary_bytes('\x80\x80', '')
client.assert_receive_close(
client_for_testing.STATUS_PROTOCOL_ERROR)
self._run_test(test_function)
def test_close_on_unsupported_frame(self):
"""Tests that the server sends a close frame with unsupported operation
status code when the client sends data asking some operation that is
not supported by the server.
"""
def test_function(client):
client.connect()
# Text frame with RSV3 bit raised.
client.send_frame_of_arbitrary_bytes('\x91\x80', '')
client.assert_receive_close(
client_for_testing.STATUS_UNSUPPORTED_DATA)
self._run_test(test_function)
def test_close_on_invalid_frame(self):
"""Tests that the server sends a close frame with invalid frame payload
data status code when the client sends an invalid frame like containing
invalid UTF-8 character.
"""
def test_function(client):
client.connect()
# Text frame with invalid UTF-8 string.
client.send_message('\x80', raw=True)
client.assert_receive_close(
client_for_testing.STATUS_INVALID_FRAME_PAYLOAD_DATA)
self._run_test(test_function)
def test_close_on_internal_endpoint_error(self):
"""Tests that the server sends a close frame with internal endpoint
error status code when the handler does bad operation.
"""
self._options.resource = '/internal_error'
def test_function(client):
client.connect()
client.assert_receive_close(
client_for_testing.STATUS_INTERNAL_ENDPOINT_ERROR)
self._run_test(test_function)
# TODO(toyoshim): Add tests to verify invalid absolute uri handling like
# host unmatch, port unmatch and invalid port description (':' without port
# number).
def test_absolute_uri(self):
"""Tests absolute uri request."""
options = self._options
options.resource = 'ws://localhost:%d/echo' % options.server_port
self._run_test_with_client_options(_echo_check_procedure, options)
def test_origin_check(self):
"""Tests http fallback on origin check fail."""
options = self._options
options.resource = '/origin_check'
# Server shows warning message for http 403 fallback. This warning
# message is confusing. Following pipe disposes warning messages.
self.server_stderr = subprocess.PIPE
self._run_http_fallback_test(options, 403)
def test_version_check(self):
"""Tests http fallback on version check fail."""
options = self._options
options.version = 99
self._run_http_fallback_test(options, 400)
class EndToEndHyBi00Test(EndToEndTestBase):
def setUp(self):
EndToEndTestBase.setUp(self)
def _run_test(self, test_function):
server = self._run_server()
try:
time.sleep(_SERVER_WARMUP_IN_SEC)
client = client_for_testing.create_client_hybi00(self._options)
try:
test_function(client)
finally:
client.close_socket()
finally:
self._kill_process(server.pid)
def test_echo(self):
self._run_test(_echo_check_procedure)
def test_echo_server_close(self):
self._run_test(_echo_check_procedure_with_goodbye)
class EndToEndTestWithEchoClient(EndToEndTestBase):
def setUp(self):
EndToEndTestBase.setUp(self)
def _check_example_echo_client_result(
self, expected, stdoutdata, stderrdata):
actual = stdoutdata.decode("utf-8")
if actual != expected:
raise Exception('Unexpected result on example echo client: '
'%r (expected) vs %r (actual)' %
(expected, actual))
if stderrdata is not None:
raise Exception('Unexpected error message on example echo '
'client: %r' % stderrdata)
def test_example_echo_client(self):
"""Tests that the echo_client.py example can talk with the server."""
server = self._run_server()
try:
time.sleep(_SERVER_WARMUP_IN_SEC)
client_command = os.path.join(
self.top_dir, 'example', 'echo_client.py')
# Expected output for the default messages.
default_expectation = ('Send: Hello\n' 'Recv: Hello\n'
u'Send: \u65e5\u672c\n' u'Recv: \u65e5\u672c\n'
'Send close\n' 'Recv ack\n')
args = [client_command,
'-p', str(self._options.server_port)]
client = self._run_python_command(args, stdout=subprocess.PIPE)
stdoutdata, stderrdata = client.communicate()
self._check_example_echo_client_result(
default_expectation, stdoutdata, stderrdata)
# Process a big message for which extended payload length is used.
# To handle extended payload length, ws_version attribute will be
# accessed. This test checks that ws_version is correctly set.
big_message = 'a' * 1024
args = [client_command,
'-p', str(self._options.server_port),
'-m', big_message]
client = self._run_python_command(args, stdout=subprocess.PIPE)
stdoutdata, stderrdata = client.communicate()
expected = ('Send: %s\nRecv: %s\nSend close\nRecv ack\n' %
(big_message, big_message))
self._check_example_echo_client_result(
expected, stdoutdata, stderrdata)
# Test the permessage-deflate extension.
args = [client_command,
'-p', str(self._options.server_port),
'--use_permessage_deflate']
client = self._run_python_command(args, stdout=subprocess.PIPE)
stdoutdata, stderrdata = client.communicate()
self._check_example_echo_client_result(
default_expectation, stdoutdata, stderrdata)
finally:
self._kill_process(server.pid)
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
| mpl-2.0 |
jlegendary/orange | Orange/OrangeWidgets/Classify/OWKNN.py | 6 | 6326 | """
<name>k Nearest Neighbours</name>
<description>K-nearest neighbours learner/classifier.</description>
<icon>icons/kNearestNeighbours.svg</icon>
<contact>Janez Demsar (janez.demsar(@at@)fri.uni-lj.si)</contact>
<priority>25</priority>
"""
from OWWidget import *
import OWGUI
from exceptions import Exception
from orngWrap import PreprocessedLearner
NAME = "k Nearest Neighbours"
ID = "orange.widgets.classify.knn"
DESCRIPTION = "K-nearest neighbours learner/classifier."
ICON = "icons/kNearestNeighbours.svg"
AUTHOR = "Janez Demsar"
PRIORITY = 25
HELP_REF = "k-Nearest Neighbours"
KEYWORDS = ["knn"]
INPUTS = (
InputSignal(name="Data",
type=ExampleTable,
handler="setData",
doc="Training data set",
id="train-data"),
InputSignal(name="Preprocess",
type=PreprocessedLearner,
handler="setPreprocessor",
id="preprocessor")
)
OUTPUTS = (
OutputSignal(name="Learner",
type=orange.Learner,
doc="The kNN learner with settings as specified in "
"the dialog",
id="learner"),
OutputSignal(name="kNN Classifier",
type=orange.kNNClassifier,
doc="A kNN classifier trained on 'Data'.",
id="knn-classifier")
)
WIDGET_CLASS = "OWKNN"
class OWKNN(OWWidget):
settingsList = ["name", "k", "metrics", "ranks", "normalize", "ignoreUnknowns"]
def __init__(self, parent=None, signalManager = None, name='kNN'):
OWWidget.__init__(self, parent, signalManager, name, wantMainArea = 0, resizingEnabled = 0)
self.callbackDeposit = []
self.inputs = [("Data", ExampleTable, self.setData), ("Preprocess", PreprocessedLearner, self.setPreprocessor)]
self.outputs = [("Learner", orange.Learner),("kNN Classifier", orange.kNNClassifier)]
self.metricsList = [("Euclidean", orange.ExamplesDistanceConstructor_Euclidean),
("Hamming", orange.ExamplesDistanceConstructor_Hamming),
("Manhattan", orange.ExamplesDistanceConstructor_Manhattan),
("Maximal", orange.ExamplesDistanceConstructor_Maximal),
# ("Dynamic time warp", orange.ExamplesDistanceConstructor_DTW)
]
# Settings
self.name = 'kNN'
self.k = 5; self.metrics = 0; self.ranks = 0
self.ignoreUnknowns = 0
self.normalize = self.oldNormalize = 1
self.loadSettings()
self.data = None # input data set
self.preprocessor = None # no preprocessing as default
self.setLearner() # this just sets the learner, no data
# has come to the input yet
OWGUI.lineEdit(self.controlArea, self, 'name', box='Learner/Classifier Name', \
tooltip='Name to be used by other widgets to identify your learner/classifier.')
OWGUI.separator(self.controlArea)
wbN = OWGUI.widgetBox(self.controlArea, "Neighbours")
OWGUI.spin(wbN, self, "k", 1, 100, 1, None, "Number of neighbours ", orientation="horizontal")
OWGUI.checkBox(wbN, self, "ranks", "Weighting by ranks, not distances")
OWGUI.separator(self.controlArea)
wbM = OWGUI.widgetBox(self.controlArea, "Metrics")
OWGUI.comboBox(wbM, self, "metrics", items = [x[0] for x in self.metricsList], valueType = int, callback = self.metricsChanged)
self.cbNormalize = OWGUI.checkBox(wbM, self, "normalize", "Normalize continuous attributes")
OWGUI.checkBox(wbM, self, "ignoreUnknowns", "Ignore unknown values")
self.metricsChanged()
OWGUI.separator(self.controlArea)
OWGUI.button(self.controlArea, self, "&Apply", callback=self.setLearner, disabled=0, default=True)
OWGUI.rubber(self.controlArea)
self.resize(100,250)
def sendReport(self):
self.reportSettings("Learning parameters",
[("Metrics", self.metricsList[self.metrics][0]),
not self.metrics and ("Continuous attributes", ["Raw", "Normalized"][self.normalize]),
("Unknown values ignored", OWGUI.YesNo[self.ignoreUnknowns]),
("Number of neighbours", self.k),
("Weighting", ["By distances", "By ranked distances"][self.ranks])])
self.reportData(self.data)
def metricsChanged(self):
if not self.metrics and not self.cbNormalize.isEnabled():
self.normalize = self.oldNormalize
self.cbNormalize.setEnabled(True)
elif self.metrics and self.cbNormalize.isEnabled():
self.oldNormalize = self.normalize
self.normalize = False
self.cbNormalize.setEnabled(False)
def setData(self,data):
self.data = self.isDataWithClass(data, orange.VarTypes.Discrete, checkMissing=True) and data or None
self.setLearner()
def setPreprocessor(self, pp):
self.preprocessor = pp
self.setLearner()
def setLearner(self):
distconst = self.metricsList[self.metrics][1]()
distconst.ignoreUnknowns = self.ignoreUnknowns
distconst.normalize = self.normalize
self.learner = orange.kNNLearner(k = self.k, rankWeight = self.ranks, distanceConstructor = distconst)
if self.preprocessor:
self.learner = self.preprocessor.wrapLearner(self.learner)
self.learner.name = self.name
self.send("Learner", self.learner)
self.learn()
def learn(self):
self.classifier = None
if self.data and self.learner:
try:
self.classifier = self.learner(self.data)
self.classifier.name = self.name
except Exception, (errValue):
self.classifier = None
self.error(str(errValue))
self.send("kNN Classifier", self.classifier)
if __name__ == "__main__":
a = QApplication(sys.argv)
ow = OWKNN()
dataset = orange.ExampleTable('adult_sample')
ow.setData(dataset)
ow.show()
a.exec_()
ow.saveSettings()
| gpl-3.0 |
TresysTechnology/setools | tests/nodeconquery.py | 1 | 10617 | # Copyright 2014, Tresys Technology, LLC
# Copyright 2017, Chris PeBenito <[email protected]>
#
# This file is part of SETools.
#
# SETools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# SETools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SETools. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import unittest
from socket import AF_INET6
from ipaddress import IPv4Network, IPv6Network
from setools import SELinuxPolicy, NodeconQuery
class NodeconQueryTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.p = SELinuxPolicy("tests/nodeconquery.conf")
def test_000_unset(self):
"""Nodecon query with no criteria"""
# query with no parameters gets all nodecons.
nodecons = sorted(self.p.nodecons())
q = NodeconQuery(self.p)
q_nodecons = sorted(q.results())
self.assertListEqual(nodecons, q_nodecons)
def test_001_ip_version(self):
"""Nodecon query with IP version match."""
q = NodeconQuery(self.p, ip_version=AF_INET6)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv6Network("1100::/16"), IPv6Network("1110::/16")], nodecons)
def test_020_user_exact(self):
"""Nodecon query with context user exact match"""
q = NodeconQuery(self.p, user="user20", user_regex=False)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.20.1/32")], nodecons)
def test_021_user_regex(self):
"""Nodecon query with context user regex match"""
q = NodeconQuery(self.p, user="user21(a|b)", user_regex=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.21.1/32"), IPv4Network("10.1.21.2/32")], nodecons)
def test_030_role_exact(self):
"""Nodecon query with context role exact match"""
q = NodeconQuery(self.p, role="role30_r", role_regex=False)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.30.1/32")], nodecons)
def test_031_role_regex(self):
"""Nodecon query with context role regex match"""
q = NodeconQuery(self.p, role="role31(a|c)_r", role_regex=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.31.1/32"), IPv4Network("10.1.31.3/32")], nodecons)
def test_040_type_exact(self):
"""Nodecon query with context type exact match"""
q = NodeconQuery(self.p, type_="type40", type_regex=False)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.40.1/32")], nodecons)
def test_041_type_regex(self):
"""Nodecon query with context type regex match"""
q = NodeconQuery(self.p, type_="type41(b|c)", type_regex=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.41.2/32"), IPv4Network("10.1.41.3/32")], nodecons)
def test_050_range_exact(self):
"""Nodecon query with context range exact match"""
q = NodeconQuery(self.p, range_="s0:c1 - s0:c0.c4")
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.50.1/32")], nodecons)
def test_051_range_overlap1(self):
"""Nodecon query with context range overlap match (equal)"""
q = NodeconQuery(self.p, range_="s1:c1 - s1:c0.c4", range_overlap=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.51.1/32")], nodecons)
def test_051_range_overlap2(self):
"""Nodecon query with context range overlap match (subset)"""
q = NodeconQuery(self.p, range_="s1:c1,c2 - s1:c0.c3", range_overlap=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.51.1/32")], nodecons)
def test_051_range_overlap3(self):
"""Nodecon query with context range overlap match (superset)"""
q = NodeconQuery(self.p, range_="s1 - s1:c0.c4", range_overlap=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.51.1/32")], nodecons)
def test_051_range_overlap4(self):
"""Nodecon query with context range overlap match (overlap low level)"""
q = NodeconQuery(self.p, range_="s1 - s1:c1,c2", range_overlap=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.51.1/32")], nodecons)
def test_051_range_overlap5(self):
"""Nodecon query with context range overlap match (overlap high level)"""
q = NodeconQuery(self.p, range_="s1:c1,c2 - s1:c0.c4", range_overlap=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.51.1/32")], nodecons)
def test_052_range_subset1(self):
"""Nodecon query with context range subset match"""
q = NodeconQuery(self.p, range_="s2:c1,c2 - s2:c0.c3", range_overlap=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.52.1/32")], nodecons)
def test_052_range_subset2(self):
"""Nodecon query with context range subset match (equal)"""
q = NodeconQuery(self.p, range_="s2:c1 - s2:c1.c3", range_overlap=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.52.1/32")], nodecons)
def test_053_range_superset1(self):
"""Nodecon query with context range superset match"""
q = NodeconQuery(self.p, range_="s3 - s3:c0.c4", range_superset=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.53.1/32")], nodecons)
def test_053_range_superset2(self):
"""Nodecon query with context range superset match (equal)"""
q = NodeconQuery(self.p, range_="s3:c1 - s3:c1.c3", range_superset=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.53.1/32")], nodecons)
def test_054_range_proper_subset1(self):
"""Nodecon query with context range proper subset match"""
q = NodeconQuery(self.p, range_="s4:c1,c2", range_subset=True, range_proper=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.54.1/32")], nodecons)
def test_054_range_proper_subset2(self):
"""Nodecon query with context range proper subset match (equal)"""
q = NodeconQuery(self.p, range_="s4:c1 - s4:c1.c3", range_subset=True, range_proper=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([], nodecons)
def test_054_range_proper_subset3(self):
"""Nodecon query with context range proper subset match (equal low only)"""
q = NodeconQuery(self.p, range_="s4:c1 - s4:c1.c2", range_subset=True, range_proper=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.54.1/32")], nodecons)
def test_054_range_proper_subset4(self):
"""Nodecon query with context range proper subset match (equal high only)"""
q = NodeconQuery(self.p, range_="s4:c1,c2 - s4:c1.c3", range_subset=True, range_proper=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.54.1/32")], nodecons)
def test_055_range_proper_superset1(self):
"""Nodecon query with context range proper superset match"""
q = NodeconQuery(self.p, range_="s5 - s5:c0.c4", range_superset=True, range_proper=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.55.1/32")], nodecons)
def test_055_range_proper_superset2(self):
"""Nodecon query with context range proper superset match (equal)"""
q = NodeconQuery(self.p, range_="s5:c1 - s5:c1.c3", range_superset=True, range_proper=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([], nodecons)
def test_055_range_proper_superset3(self):
"""Nodecon query with context range proper superset match (equal low)"""
q = NodeconQuery(self.p, range_="s5:c1 - s5:c1.c4", range_superset=True, range_proper=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.55.1/32")], nodecons)
def test_055_range_proper_superset4(self):
"""Nodecon query with context range proper superset match (equal high)"""
q = NodeconQuery(self.p, range_="s5 - s5:c1.c3", range_superset=True, range_proper=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("10.1.55.1/32")], nodecons)
def test_100_v4network_equal(self):
"""Nodecon query with IPv4 equal network"""
q = NodeconQuery(self.p, network="192.168.1.0/24", network_overlap=False)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("192.168.1.0/24")], nodecons)
def test_101_v4network_overlap(self):
"""Nodecon query with IPv4 network overlap"""
q = NodeconQuery(self.p, network="192.168.201.0/24", network_overlap=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv4Network("192.168.200.0/22")], nodecons)
def test_110_v6network_equal(self):
"""Nodecon query with IPv6 equal network"""
q = NodeconQuery(self.p, network="1100::/16", network_overlap=False)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv6Network("1100::/16")], nodecons)
def test_111_v6network_overlap(self):
"""Nodecon query with IPv6 network overlap"""
q = NodeconQuery(self.p, network="1110:8000::/17", network_overlap=True)
nodecons = sorted(n.network for n in q.results())
self.assertListEqual([IPv6Network("1110::/16")], nodecons)
| lgpl-2.1 |
lcf258/openwrtcnwanzhen | target/linux/x86/image/mkimg_bifferboard.py | 561 | 1265 | #!/usr/bin/env python
"""
Create firmware for 4/8MB Bifferboards, suitable for uploading using
either bb_upload8.py or bb_eth_upload8.py
"""
import struct, sys
# Increase the kmax value if the script gives errors about the kernel being
# too large. You need to set the Biffboot kmax value to the same value you
# use here.
kmax = 0x10
# No need to change this for 4MB devices, it's only used to tell you if
# the firmware is too large!
flash_size = 0x800000
# This is always the same, for 1MB, 4MB and 8MB devices
config_extent = 0x6000
kernel_extent = kmax * 0x10000
if __name__ == "__main__":
if len(sys.argv) != 4:
print "usage: mkimg_bifferboard.py <kernel> <rootfs> <output file>"
sys.exit(-1)
bzimage = sys.argv[1]
rootfs = sys.argv[2]
target = sys.argv[3]
# Kernel first
fw = file(bzimage).read()
if len(fw) > (kernel_extent - config_extent):
raise IOError("Kernel too large")
# Pad up to end of kernel partition
while len(fw) < (kernel_extent - config_extent):
fw += "\xff"
fw += file(rootfs).read()
# Check length of total
if len(fw) > (flash_size - 0x10000 - config_extent):
raise IOError("Rootfs too large")
file(target,"wb").write(fw)
print "Firmware written to '%s'" % target
| gpl-2.0 |
jeremiahmarks/sl4a | python/src/Lib/encodings/iso8859_3.py | 593 | 13345 | """ Python Character Mapping Codec iso8859_3 generated from 'MAPPINGS/ISO8859/8859-3.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-3',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0126' # 0xA1 -> LATIN CAPITAL LETTER H WITH STROKE
u'\u02d8' # 0xA2 -> BREVE
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\ufffe'
u'\u0124' # 0xA6 -> LATIN CAPITAL LETTER H WITH CIRCUMFLEX
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\u0130' # 0xA9 -> LATIN CAPITAL LETTER I WITH DOT ABOVE
u'\u015e' # 0xAA -> LATIN CAPITAL LETTER S WITH CEDILLA
u'\u011e' # 0xAB -> LATIN CAPITAL LETTER G WITH BREVE
u'\u0134' # 0xAC -> LATIN CAPITAL LETTER J WITH CIRCUMFLEX
u'\xad' # 0xAD -> SOFT HYPHEN
u'\ufffe'
u'\u017b' # 0xAF -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\u0127' # 0xB1 -> LATIN SMALL LETTER H WITH STROKE
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u0125' # 0xB6 -> LATIN SMALL LETTER H WITH CIRCUMFLEX
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\xb8' # 0xB8 -> CEDILLA
u'\u0131' # 0xB9 -> LATIN SMALL LETTER DOTLESS I
u'\u015f' # 0xBA -> LATIN SMALL LETTER S WITH CEDILLA
u'\u011f' # 0xBB -> LATIN SMALL LETTER G WITH BREVE
u'\u0135' # 0xBC -> LATIN SMALL LETTER J WITH CIRCUMFLEX
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\ufffe'
u'\u017c' # 0xBF -> LATIN SMALL LETTER Z WITH DOT ABOVE
u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\ufffe'
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\u010a' # 0xC5 -> LATIN CAPITAL LETTER C WITH DOT ABOVE
u'\u0108' # 0xC6 -> LATIN CAPITAL LETTER C WITH CIRCUMFLEX
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\ufffe'
u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u0120' # 0xD5 -> LATIN CAPITAL LETTER G WITH DOT ABOVE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
u'\u011c' # 0xD8 -> LATIN CAPITAL LETTER G WITH CIRCUMFLEX
u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\u016c' # 0xDD -> LATIN CAPITAL LETTER U WITH BREVE
u'\u015c' # 0xDE -> LATIN CAPITAL LETTER S WITH CIRCUMFLEX
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\ufffe'
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u010b' # 0xE5 -> LATIN SMALL LETTER C WITH DOT ABOVE
u'\u0109' # 0xE6 -> LATIN SMALL LETTER C WITH CIRCUMFLEX
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\ufffe'
u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\u0121' # 0xF5 -> LATIN SMALL LETTER G WITH DOT ABOVE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0xF7 -> DIVISION SIGN
u'\u011d' # 0xF8 -> LATIN SMALL LETTER G WITH CIRCUMFLEX
u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u016d' # 0xFD -> LATIN SMALL LETTER U WITH BREVE
u'\u015d' # 0xFE -> LATIN SMALL LETTER S WITH CIRCUMFLEX
u'\u02d9' # 0xFF -> DOT ABOVE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
coins4lunch/EvilCoin | share/qt/make_spinner.py | 4415 | 1035 | #!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
| mit |
zq317157782/Narukami | external/googletest/googlemock/scripts/generator/cpp/ast.py | 16 | 62772 | #!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate an Abstract Syntax Tree (AST) for C++."""
__author__ = '[email protected] (Neal Norwitz)'
# TODO:
# * Tokens should never be exported, need to convert to Nodes
# (return types, parameters, etc.)
# * Handle static class data for templatized classes
# * Handle casts (both C++ and C-style)
# * Handle conditions and loops (if/else, switch, for, while/do)
#
# TODO much, much later:
# * Handle #define
# * exceptions
try:
# Python 3.x
import builtins
except ImportError:
# Python 2.x
import __builtin__ as builtins
import sys
import traceback
from cpp import keywords
from cpp import tokenize
from cpp import utils
if not hasattr(builtins, 'reversed'):
# Support Python 2.3 and earlier.
def reversed(seq):
for i in range(len(seq)-1, -1, -1):
yield seq[i]
if not hasattr(builtins, 'next'):
# Support Python 2.5 and earlier.
def next(obj):
return obj.next()
VISIBILITY_PUBLIC, VISIBILITY_PROTECTED, VISIBILITY_PRIVATE = range(3)
FUNCTION_NONE = 0x00
FUNCTION_CONST = 0x01
FUNCTION_VIRTUAL = 0x02
FUNCTION_PURE_VIRTUAL = 0x04
FUNCTION_CTOR = 0x08
FUNCTION_DTOR = 0x10
FUNCTION_ATTRIBUTE = 0x20
FUNCTION_UNKNOWN_ANNOTATION = 0x40
FUNCTION_THROW = 0x80
FUNCTION_OVERRIDE = 0x100
"""
These are currently unused. Should really handle these properly at some point.
TYPE_MODIFIER_INLINE = 0x010000
TYPE_MODIFIER_EXTERN = 0x020000
TYPE_MODIFIER_STATIC = 0x040000
TYPE_MODIFIER_CONST = 0x080000
TYPE_MODIFIER_REGISTER = 0x100000
TYPE_MODIFIER_VOLATILE = 0x200000
TYPE_MODIFIER_MUTABLE = 0x400000
TYPE_MODIFIER_MAP = {
'inline': TYPE_MODIFIER_INLINE,
'extern': TYPE_MODIFIER_EXTERN,
'static': TYPE_MODIFIER_STATIC,
'const': TYPE_MODIFIER_CONST,
'register': TYPE_MODIFIER_REGISTER,
'volatile': TYPE_MODIFIER_VOLATILE,
'mutable': TYPE_MODIFIER_MUTABLE,
}
"""
_INTERNAL_TOKEN = 'internal'
_NAMESPACE_POP = 'ns-pop'
# TODO(nnorwitz): use this as a singleton for templated_types, etc
# where we don't want to create a new empty dict each time. It is also const.
class _NullDict(object):
__contains__ = lambda self: False
keys = values = items = iterkeys = itervalues = iteritems = lambda self: ()
# TODO(nnorwitz): move AST nodes into a separate module.
class Node(object):
"""Base AST node."""
def __init__(self, start, end):
self.start = start
self.end = end
def IsDeclaration(self):
"""Returns bool if this node is a declaration."""
return False
def IsDefinition(self):
"""Returns bool if this node is a definition."""
return False
def IsExportable(self):
"""Returns bool if this node exportable from a header file."""
return False
def Requires(self, node):
"""Does this AST node require the definition of the node passed in?"""
return False
def XXX__str__(self):
return self._StringHelper(self.__class__.__name__, '')
def _StringHelper(self, name, suffix):
if not utils.DEBUG:
return '%s(%s)' % (name, suffix)
return '%s(%d, %d, %s)' % (name, self.start, self.end, suffix)
def __repr__(self):
return str(self)
class Define(Node):
def __init__(self, start, end, name, definition):
Node.__init__(self, start, end)
self.name = name
self.definition = definition
def __str__(self):
value = '%s %s' % (self.name, self.definition)
return self._StringHelper(self.__class__.__name__, value)
class Include(Node):
def __init__(self, start, end, filename, system):
Node.__init__(self, start, end)
self.filename = filename
self.system = system
def __str__(self):
fmt = '"%s"'
if self.system:
fmt = '<%s>'
return self._StringHelper(self.__class__.__name__, fmt % self.filename)
class Goto(Node):
def __init__(self, start, end, label):
Node.__init__(self, start, end)
self.label = label
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.label))
class Expr(Node):
def __init__(self, start, end, expr):
Node.__init__(self, start, end)
self.expr = expr
def Requires(self, node):
# TODO(nnorwitz): impl.
return False
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.expr))
class Return(Expr):
pass
class Delete(Expr):
pass
class Friend(Expr):
def __init__(self, start, end, expr, namespace):
Expr.__init__(self, start, end, expr)
self.namespace = namespace[:]
class Using(Node):
def __init__(self, start, end, names):
Node.__init__(self, start, end)
self.names = names
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.names))
class Parameter(Node):
def __init__(self, start, end, name, parameter_type, default):
Node.__init__(self, start, end)
self.name = name
self.type = parameter_type
self.default = default
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
return self.type.name == node.name
def __str__(self):
name = str(self.type)
suffix = '%s %s' % (name, self.name)
if self.default:
suffix += ' = ' + ''.join([d.name for d in self.default])
return self._StringHelper(self.__class__.__name__, suffix)
class _GenericDeclaration(Node):
def __init__(self, start, end, name, namespace):
Node.__init__(self, start, end)
self.name = name
self.namespace = namespace[:]
def FullName(self):
prefix = ''
if self.namespace and self.namespace[-1]:
prefix = '::'.join(self.namespace) + '::'
return prefix + self.name
def _TypeStringHelper(self, suffix):
if self.namespace:
names = [n or '<anonymous>' for n in self.namespace]
suffix += ' in ' + '::'.join(names)
return self._StringHelper(self.__class__.__name__, suffix)
# TODO(nnorwitz): merge with Parameter in some way?
class VariableDeclaration(_GenericDeclaration):
def __init__(self, start, end, name, var_type, initial_value, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.type = var_type
self.initial_value = initial_value
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
return self.type.name == node.name
def ToString(self):
"""Return a string that tries to reconstitute the variable decl."""
suffix = '%s %s' % (self.type, self.name)
if self.initial_value:
suffix += ' = ' + self.initial_value
return suffix
def __str__(self):
return self._StringHelper(self.__class__.__name__, self.ToString())
class Typedef(_GenericDeclaration):
def __init__(self, start, end, name, alias, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.alias = alias
def IsDefinition(self):
return True
def IsExportable(self):
return True
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
name = node.name
for token in self.alias:
if token is not None and name == token.name:
return True
return False
def __str__(self):
suffix = '%s, %s' % (self.name, self.alias)
return self._TypeStringHelper(suffix)
class _NestedType(_GenericDeclaration):
def __init__(self, start, end, name, fields, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.fields = fields
def IsDefinition(self):
return True
def IsExportable(self):
return True
def __str__(self):
suffix = '%s, {%s}' % (self.name, self.fields)
return self._TypeStringHelper(suffix)
class Union(_NestedType):
pass
class Enum(_NestedType):
pass
class Class(_GenericDeclaration):
def __init__(self, start, end, name, bases, templated_types, body, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.bases = bases
self.body = body
self.templated_types = templated_types
def IsDeclaration(self):
return self.bases is None and self.body is None
def IsDefinition(self):
return not self.IsDeclaration()
def IsExportable(self):
return not self.IsDeclaration()
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
if self.bases:
for token_list in self.bases:
# TODO(nnorwitz): bases are tokens, do name comparison.
for token in token_list:
if token.name == node.name:
return True
# TODO(nnorwitz): search in body too.
return False
def __str__(self):
name = self.name
if self.templated_types:
name += '<%s>' % self.templated_types
suffix = '%s, %s, %s' % (name, self.bases, self.body)
return self._TypeStringHelper(suffix)
class Struct(Class):
pass
class Function(_GenericDeclaration):
def __init__(self, start, end, name, return_type, parameters,
modifiers, templated_types, body, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
converter = TypeConverter(namespace)
self.return_type = converter.CreateReturnType(return_type)
self.parameters = converter.ToParameters(parameters)
self.modifiers = modifiers
self.body = body
self.templated_types = templated_types
def IsDeclaration(self):
return self.body is None
def IsDefinition(self):
return self.body is not None
def IsExportable(self):
if self.return_type and 'static' in self.return_type.modifiers:
return False
return None not in self.namespace
def Requires(self, node):
if self.parameters:
# TODO(nnorwitz): parameters are tokens, do name comparison.
for p in self.parameters:
if p.name == node.name:
return True
# TODO(nnorwitz): search in body too.
return False
def __str__(self):
# TODO(nnorwitz): add templated_types.
suffix = ('%s %s(%s), 0x%02x, %s' %
(self.return_type, self.name, self.parameters,
self.modifiers, self.body))
return self._TypeStringHelper(suffix)
class Method(Function):
def __init__(self, start, end, name, in_class, return_type, parameters,
modifiers, templated_types, body, namespace):
Function.__init__(self, start, end, name, return_type, parameters,
modifiers, templated_types, body, namespace)
# TODO(nnorwitz): in_class could also be a namespace which can
# mess up finding functions properly.
self.in_class = in_class
class Type(_GenericDeclaration):
"""Type used for any variable (eg class, primitive, struct, etc)."""
def __init__(self, start, end, name, templated_types, modifiers,
reference, pointer, array):
"""
Args:
name: str name of main type
templated_types: [Class (Type?)] template type info between <>
modifiers: [str] type modifiers (keywords) eg, const, mutable, etc.
reference, pointer, array: bools
"""
_GenericDeclaration.__init__(self, start, end, name, [])
self.templated_types = templated_types
if not name and modifiers:
self.name = modifiers.pop()
self.modifiers = modifiers
self.reference = reference
self.pointer = pointer
self.array = array
def __str__(self):
prefix = ''
if self.modifiers:
prefix = ' '.join(self.modifiers) + ' '
name = str(self.name)
if self.templated_types:
name += '<%s>' % self.templated_types
suffix = prefix + name
if self.reference:
suffix += '&'
if self.pointer:
suffix += '*'
if self.array:
suffix += '[]'
return self._TypeStringHelper(suffix)
# By definition, Is* are always False. A Type can only exist in
# some sort of variable declaration, parameter, or return value.
def IsDeclaration(self):
return False
def IsDefinition(self):
return False
def IsExportable(self):
return False
class TypeConverter(object):
def __init__(self, namespace_stack):
self.namespace_stack = namespace_stack
def _GetTemplateEnd(self, tokens, start):
count = 1
end = start
while 1:
token = tokens[end]
end += 1
if token.name == '<':
count += 1
elif token.name == '>':
count -= 1
if count == 0:
break
return tokens[start:end-1], end
def ToType(self, tokens):
"""Convert [Token,...] to [Class(...), ] useful for base classes.
For example, code like class Foo : public Bar<x, y> { ... };
the "Bar<x, y>" portion gets converted to an AST.
Returns:
[Class(...), ...]
"""
result = []
name_tokens = []
reference = pointer = array = False
def AddType(templated_types):
# Partition tokens into name and modifier tokens.
names = []
modifiers = []
for t in name_tokens:
if keywords.IsKeyword(t.name):
modifiers.append(t.name)
else:
names.append(t.name)
name = ''.join(names)
if name_tokens:
result.append(Type(name_tokens[0].start, name_tokens[-1].end,
name, templated_types, modifiers,
reference, pointer, array))
del name_tokens[:]
i = 0
end = len(tokens)
while i < end:
token = tokens[i]
if token.name == '<':
new_tokens, new_end = self._GetTemplateEnd(tokens, i+1)
AddType(self.ToType(new_tokens))
# If there is a comma after the template, we need to consume
# that here otherwise it becomes part of the name.
i = new_end
reference = pointer = array = False
elif token.name == ',':
AddType([])
reference = pointer = array = False
elif token.name == '*':
pointer = True
elif token.name == '&':
reference = True
elif token.name == '[':
pointer = True
elif token.name == ']':
pass
else:
name_tokens.append(token)
i += 1
if name_tokens:
# No '<' in the tokens, just a simple name and no template.
AddType([])
return result
def DeclarationToParts(self, parts, needs_name_removed):
name = None
default = []
if needs_name_removed:
# Handle default (initial) values properly.
for i, t in enumerate(parts):
if t.name == '=':
default = parts[i+1:]
name = parts[i-1].name
if name == ']' and parts[i-2].name == '[':
name = parts[i-3].name
i -= 1
parts = parts[:i-1]
break
else:
if parts[-1].token_type == tokenize.NAME:
name = parts.pop().name
else:
# TODO(nnorwitz): this is a hack that happens for code like
# Register(Foo<T>); where it thinks this is a function call
# but it's actually a declaration.
name = '???'
modifiers = []
type_name = []
other_tokens = []
templated_types = []
i = 0
end = len(parts)
while i < end:
p = parts[i]
if keywords.IsKeyword(p.name):
modifiers.append(p.name)
elif p.name == '<':
templated_tokens, new_end = self._GetTemplateEnd(parts, i+1)
templated_types = self.ToType(templated_tokens)
i = new_end - 1
# Don't add a spurious :: to data members being initialized.
next_index = i + 1
if next_index < end and parts[next_index].name == '::':
i += 1
elif p.name in ('[', ']', '='):
# These are handled elsewhere.
other_tokens.append(p)
elif p.name not in ('*', '&', '>'):
# Ensure that names have a space between them.
if (type_name and type_name[-1].token_type == tokenize.NAME and
p.token_type == tokenize.NAME):
type_name.append(tokenize.Token(tokenize.SYNTAX, ' ', 0, 0))
type_name.append(p)
else:
other_tokens.append(p)
i += 1
type_name = ''.join([t.name for t in type_name])
return name, type_name, templated_types, modifiers, default, other_tokens
def ToParameters(self, tokens):
if not tokens:
return []
result = []
name = type_name = ''
type_modifiers = []
pointer = reference = array = False
first_token = None
default = []
def AddParameter(end):
if default:
del default[0] # Remove flag.
parts = self.DeclarationToParts(type_modifiers, True)
(name, type_name, templated_types, modifiers,
unused_default, unused_other_tokens) = parts
parameter_type = Type(first_token.start, first_token.end,
type_name, templated_types, modifiers,
reference, pointer, array)
p = Parameter(first_token.start, end, name,
parameter_type, default)
result.append(p)
template_count = 0
for s in tokens:
if not first_token:
first_token = s
if s.name == '<':
template_count += 1
elif s.name == '>':
template_count -= 1
if template_count > 0:
type_modifiers.append(s)
continue
if s.name == ',':
AddParameter(s.start)
name = type_name = ''
type_modifiers = []
pointer = reference = array = False
first_token = None
default = []
elif s.name == '*':
pointer = True
elif s.name == '&':
reference = True
elif s.name == '[':
array = True
elif s.name == ']':
pass # Just don't add to type_modifiers.
elif s.name == '=':
# Got a default value. Add any value (None) as a flag.
default.append(None)
elif default:
default.append(s)
else:
type_modifiers.append(s)
AddParameter(tokens[-1].end)
return result
def CreateReturnType(self, return_type_seq):
if not return_type_seq:
return None
start = return_type_seq[0].start
end = return_type_seq[-1].end
_, name, templated_types, modifiers, default, other_tokens = \
self.DeclarationToParts(return_type_seq, False)
names = [n.name for n in other_tokens]
reference = '&' in names
pointer = '*' in names
array = '[' in names
return Type(start, end, name, templated_types, modifiers,
reference, pointer, array)
def GetTemplateIndices(self, names):
# names is a list of strings.
start = names.index('<')
end = len(names) - 1
while end > 0:
if names[end] == '>':
break
end -= 1
return start, end+1
class AstBuilder(object):
def __init__(self, token_stream, filename, in_class='', visibility=None,
namespace_stack=[]):
self.tokens = token_stream
self.filename = filename
# TODO(nnorwitz): use a better data structure (deque) for the queue.
# Switching directions of the "queue" improved perf by about 25%.
# Using a deque should be even better since we access from both sides.
self.token_queue = []
self.namespace_stack = namespace_stack[:]
self.in_class = in_class
if in_class is None:
self.in_class_name_only = None
else:
self.in_class_name_only = in_class.split('::')[-1]
self.visibility = visibility
self.in_function = False
self.current_token = None
# Keep the state whether we are currently handling a typedef or not.
self._handling_typedef = False
self.converter = TypeConverter(self.namespace_stack)
def HandleError(self, msg, token):
printable_queue = list(reversed(self.token_queue[-20:]))
sys.stderr.write('Got %s in %s @ %s %s\n' %
(msg, self.filename, token, printable_queue))
def Generate(self):
while 1:
token = self._GetNextToken()
if not token:
break
# Get the next token.
self.current_token = token
# Dispatch on the next token type.
if token.token_type == _INTERNAL_TOKEN:
if token.name == _NAMESPACE_POP:
self.namespace_stack.pop()
continue
try:
result = self._GenerateOne(token)
if result is not None:
yield result
except:
self.HandleError('exception', token)
raise
def _CreateVariable(self, pos_token, name, type_name, type_modifiers,
ref_pointer_name_seq, templated_types, value=None):
reference = '&' in ref_pointer_name_seq
pointer = '*' in ref_pointer_name_seq
array = '[' in ref_pointer_name_seq
var_type = Type(pos_token.start, pos_token.end, type_name,
templated_types, type_modifiers,
reference, pointer, array)
return VariableDeclaration(pos_token.start, pos_token.end,
name, var_type, value, self.namespace_stack)
def _GenerateOne(self, token):
if token.token_type == tokenize.NAME:
if (keywords.IsKeyword(token.name) and
not keywords.IsBuiltinType(token.name)):
method = getattr(self, 'handle_' + token.name)
return method()
elif token.name == self.in_class_name_only:
# The token name is the same as the class, must be a ctor if
# there is a paren. Otherwise, it's the return type.
# Peek ahead to get the next token to figure out which.
next = self._GetNextToken()
self._AddBackToken(next)
if next.token_type == tokenize.SYNTAX and next.name == '(':
return self._GetMethod([token], FUNCTION_CTOR, None, True)
# Fall through--handle like any other method.
# Handle data or function declaration/definition.
syntax = tokenize.SYNTAX
temp_tokens, last_token = \
self._GetVarTokensUpTo(syntax, '(', ';', '{', '[')
temp_tokens.insert(0, token)
if last_token.name == '(':
# If there is an assignment before the paren,
# this is an expression, not a method.
expr = bool([e for e in temp_tokens if e.name == '='])
if expr:
new_temp = self._GetTokensUpTo(tokenize.SYNTAX, ';')
temp_tokens.append(last_token)
temp_tokens.extend(new_temp)
last_token = tokenize.Token(tokenize.SYNTAX, ';', 0, 0)
if last_token.name == '[':
# Handle array, this isn't a method, unless it's an operator.
# TODO(nnorwitz): keep the size somewhere.
# unused_size = self._GetTokensUpTo(tokenize.SYNTAX, ']')
temp_tokens.append(last_token)
if temp_tokens[-2].name == 'operator':
temp_tokens.append(self._GetNextToken())
else:
temp_tokens2, last_token = \
self._GetVarTokensUpTo(tokenize.SYNTAX, ';')
temp_tokens.extend(temp_tokens2)
if last_token.name == ';':
# Handle data, this isn't a method.
parts = self.converter.DeclarationToParts(temp_tokens, True)
(name, type_name, templated_types, modifiers, default,
unused_other_tokens) = parts
t0 = temp_tokens[0]
names = [t.name for t in temp_tokens]
if templated_types:
start, end = self.converter.GetTemplateIndices(names)
names = names[:start] + names[end:]
default = ''.join([t.name for t in default])
return self._CreateVariable(t0, name, type_name, modifiers,
names, templated_types, default)
if last_token.name == '{':
self._AddBackTokens(temp_tokens[1:])
self._AddBackToken(last_token)
method_name = temp_tokens[0].name
method = getattr(self, 'handle_' + method_name, None)
if not method:
# Must be declaring a variable.
# TODO(nnorwitz): handle the declaration.
return None
return method()
return self._GetMethod(temp_tokens, 0, None, False)
elif token.token_type == tokenize.SYNTAX:
if token.name == '~' and self.in_class:
# Must be a dtor (probably not in method body).
token = self._GetNextToken()
# self.in_class can contain A::Name, but the dtor will only
# be Name. Make sure to compare against the right value.
if (token.token_type == tokenize.NAME and
token.name == self.in_class_name_only):
return self._GetMethod([token], FUNCTION_DTOR, None, True)
# TODO(nnorwitz): handle a lot more syntax.
elif token.token_type == tokenize.PREPROCESSOR:
# TODO(nnorwitz): handle more preprocessor directives.
# token starts with a #, so remove it and strip whitespace.
name = token.name[1:].lstrip()
if name.startswith('include'):
# Remove "include".
name = name[7:].strip()
assert name
# Handle #include \<newline> "header-on-second-line.h".
if name.startswith('\\'):
name = name[1:].strip()
assert name[0] in '<"', token
assert name[-1] in '>"', token
system = name[0] == '<'
filename = name[1:-1]
return Include(token.start, token.end, filename, system)
if name.startswith('define'):
# Remove "define".
name = name[6:].strip()
assert name
value = ''
for i, c in enumerate(name):
if c.isspace():
value = name[i:].lstrip()
name = name[:i]
break
return Define(token.start, token.end, name, value)
if name.startswith('if') and name[2:3].isspace():
condition = name[3:].strip()
if condition.startswith('0') or condition.startswith('(0)'):
self._SkipIf0Blocks()
return None
def _GetTokensUpTo(self, expected_token_type, expected_token):
return self._GetVarTokensUpTo(expected_token_type, expected_token)[0]
def _GetVarTokensUpTo(self, expected_token_type, *expected_tokens):
last_token = self._GetNextToken()
tokens = []
while (last_token.token_type != expected_token_type or
last_token.name not in expected_tokens):
tokens.append(last_token)
last_token = self._GetNextToken()
return tokens, last_token
# TODO(nnorwitz): remove _IgnoreUpTo() it shouldn't be necessary.
def _IgnoreUpTo(self, token_type, token):
unused_tokens = self._GetTokensUpTo(token_type, token)
def _SkipIf0Blocks(self):
count = 1
while 1:
token = self._GetNextToken()
if token.token_type != tokenize.PREPROCESSOR:
continue
name = token.name[1:].lstrip()
if name.startswith('endif'):
count -= 1
if count == 0:
break
elif name.startswith('if'):
count += 1
def _GetMatchingChar(self, open_paren, close_paren, GetNextToken=None):
if GetNextToken is None:
GetNextToken = self._GetNextToken
# Assumes the current token is open_paren and we will consume
# and return up to the close_paren.
count = 1
token = GetNextToken()
while 1:
if token.token_type == tokenize.SYNTAX:
if token.name == open_paren:
count += 1
elif token.name == close_paren:
count -= 1
if count == 0:
break
yield token
token = GetNextToken()
yield token
def _GetParameters(self):
return self._GetMatchingChar('(', ')')
def GetScope(self):
return self._GetMatchingChar('{', '}')
def _GetNextToken(self):
if self.token_queue:
return self.token_queue.pop()
return next(self.tokens)
def _AddBackToken(self, token):
if token.whence == tokenize.WHENCE_STREAM:
token.whence = tokenize.WHENCE_QUEUE
self.token_queue.insert(0, token)
else:
assert token.whence == tokenize.WHENCE_QUEUE, token
self.token_queue.append(token)
def _AddBackTokens(self, tokens):
if tokens:
if tokens[-1].whence == tokenize.WHENCE_STREAM:
for token in tokens:
token.whence = tokenize.WHENCE_QUEUE
self.token_queue[:0] = reversed(tokens)
else:
assert tokens[-1].whence == tokenize.WHENCE_QUEUE, tokens
self.token_queue.extend(reversed(tokens))
def GetName(self, seq=None):
"""Returns ([tokens], next_token_info)."""
GetNextToken = self._GetNextToken
if seq is not None:
it = iter(seq)
GetNextToken = lambda: next(it)
next_token = GetNextToken()
tokens = []
last_token_was_name = False
while (next_token.token_type == tokenize.NAME or
(next_token.token_type == tokenize.SYNTAX and
next_token.name in ('::', '<'))):
# Two NAMEs in a row means the identifier should terminate.
# It's probably some sort of variable declaration.
if last_token_was_name and next_token.token_type == tokenize.NAME:
break
last_token_was_name = next_token.token_type == tokenize.NAME
tokens.append(next_token)
# Handle templated names.
if next_token.name == '<':
tokens.extend(self._GetMatchingChar('<', '>', GetNextToken))
last_token_was_name = True
next_token = GetNextToken()
return tokens, next_token
def GetMethod(self, modifiers, templated_types):
return_type_and_name = self._GetTokensUpTo(tokenize.SYNTAX, '(')
assert len(return_type_and_name) >= 1
return self._GetMethod(return_type_and_name, modifiers, templated_types,
False)
def _GetMethod(self, return_type_and_name, modifiers, templated_types,
get_paren):
template_portion = None
if get_paren:
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
if token.name == '<':
# Handle templatized dtors.
template_portion = [token]
template_portion.extend(self._GetMatchingChar('<', '>'))
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '(', token
name = return_type_and_name.pop()
# Handle templatized ctors.
if name.name == '>':
index = 1
while return_type_and_name[index].name != '<':
index += 1
template_portion = return_type_and_name[index:] + [name]
del return_type_and_name[index:]
name = return_type_and_name.pop()
elif name.name == ']':
rt = return_type_and_name
assert rt[-1].name == '[', return_type_and_name
assert rt[-2].name == 'operator', return_type_and_name
name_seq = return_type_and_name[-2:]
del return_type_and_name[-2:]
name = tokenize.Token(tokenize.NAME, 'operator[]',
name_seq[0].start, name.end)
# Get the open paren so _GetParameters() below works.
unused_open_paren = self._GetNextToken()
# TODO(nnorwitz): store template_portion.
return_type = return_type_and_name
indices = name
if return_type:
indices = return_type[0]
# Force ctor for templatized ctors.
if name.name == self.in_class and not modifiers:
modifiers |= FUNCTION_CTOR
parameters = list(self._GetParameters())
del parameters[-1] # Remove trailing ')'.
# Handling operator() is especially weird.
if name.name == 'operator' and not parameters:
token = self._GetNextToken()
assert token.name == '(', token
parameters = list(self._GetParameters())
del parameters[-1] # Remove trailing ')'.
token = self._GetNextToken()
while token.token_type == tokenize.NAME:
modifier_token = token
token = self._GetNextToken()
if modifier_token.name == 'const':
modifiers |= FUNCTION_CONST
elif modifier_token.name == '__attribute__':
# TODO(nnorwitz): handle more __attribute__ details.
modifiers |= FUNCTION_ATTRIBUTE
assert token.name == '(', token
# Consume everything between the (parens).
unused_tokens = list(self._GetMatchingChar('(', ')'))
token = self._GetNextToken()
elif modifier_token.name == 'throw':
modifiers |= FUNCTION_THROW
assert token.name == '(', token
# Consume everything between the (parens).
unused_tokens = list(self._GetMatchingChar('(', ')'))
token = self._GetNextToken()
elif modifier_token.name == 'override':
modifiers |= FUNCTION_OVERRIDE
elif modifier_token.name == modifier_token.name.upper():
# HACK(nnorwitz): assume that all upper-case names
# are some macro we aren't expanding.
modifiers |= FUNCTION_UNKNOWN_ANNOTATION
else:
self.HandleError('unexpected token', modifier_token)
assert token.token_type == tokenize.SYNTAX, token
# Handle ctor initializers.
if token.name == ':':
# TODO(nnorwitz): anything else to handle for initializer list?
while token.name != ';' and token.name != '{':
token = self._GetNextToken()
# Handle pointer to functions that are really data but look
# like method declarations.
if token.name == '(':
if parameters[0].name == '*':
# name contains the return type.
name = parameters.pop()
# parameters contains the name of the data.
modifiers = [p.name for p in parameters]
# Already at the ( to open the parameter list.
function_parameters = list(self._GetMatchingChar('(', ')'))
del function_parameters[-1] # Remove trailing ')'.
# TODO(nnorwitz): store the function_parameters.
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == ';', token
return self._CreateVariable(indices, name.name, indices.name,
modifiers, '', None)
# At this point, we got something like:
# return_type (type::*name_)(params);
# This is a data member called name_ that is a function pointer.
# With this code: void (sq_type::*field_)(string&);
# We get: name=void return_type=[] parameters=sq_type ... field_
# TODO(nnorwitz): is return_type always empty?
# TODO(nnorwitz): this isn't even close to being correct.
# Just put in something so we don't crash and can move on.
real_name = parameters[-1]
modifiers = [p.name for p in self._GetParameters()]
del modifiers[-1] # Remove trailing ')'.
return self._CreateVariable(indices, real_name.name, indices.name,
modifiers, '', None)
if token.name == '{':
body = list(self.GetScope())
del body[-1] # Remove trailing '}'.
else:
body = None
if token.name == '=':
token = self._GetNextToken()
if token.name == 'default' or token.name == 'delete':
# Ignore explicitly defaulted and deleted special members
# in C++11.
token = self._GetNextToken()
else:
# Handle pure-virtual declarations.
assert token.token_type == tokenize.CONSTANT, token
assert token.name == '0', token
modifiers |= FUNCTION_PURE_VIRTUAL
token = self._GetNextToken()
if token.name == '[':
# TODO(nnorwitz): store tokens and improve parsing.
# template <typename T, size_t N> char (&ASH(T (&seq)[N]))[N];
tokens = list(self._GetMatchingChar('[', ']'))
token = self._GetNextToken()
assert token.name == ';', (token, return_type_and_name, parameters)
# Looks like we got a method, not a function.
if len(return_type) > 2 and return_type[-1].name == '::':
return_type, in_class = \
self._GetReturnTypeAndClassName(return_type)
return Method(indices.start, indices.end, name.name, in_class,
return_type, parameters, modifiers, templated_types,
body, self.namespace_stack)
return Function(indices.start, indices.end, name.name, return_type,
parameters, modifiers, templated_types, body,
self.namespace_stack)
def _GetReturnTypeAndClassName(self, token_seq):
# Splitting the return type from the class name in a method
# can be tricky. For example, Return::Type::Is::Hard::To::Find().
# Where is the return type and where is the class name?
# The heuristic used is to pull the last name as the class name.
# This includes all the templated type info.
# TODO(nnorwitz): if there is only One name like in the
# example above, punt and assume the last bit is the class name.
# Ignore a :: prefix, if exists so we can find the first real name.
i = 0
if token_seq[0].name == '::':
i = 1
# Ignore a :: suffix, if exists.
end = len(token_seq) - 1
if token_seq[end-1].name == '::':
end -= 1
# Make a copy of the sequence so we can append a sentinel
# value. This is required for GetName will has to have some
# terminating condition beyond the last name.
seq_copy = token_seq[i:end]
seq_copy.append(tokenize.Token(tokenize.SYNTAX, '', 0, 0))
names = []
while i < end:
# Iterate through the sequence parsing out each name.
new_name, next = self.GetName(seq_copy[i:])
assert new_name, 'Got empty new_name, next=%s' % next
# We got a pointer or ref. Add it to the name.
if next and next.token_type == tokenize.SYNTAX:
new_name.append(next)
names.append(new_name)
i += len(new_name)
# Now that we have the names, it's time to undo what we did.
# Remove the sentinel value.
names[-1].pop()
# Flatten the token sequence for the return type.
return_type = [e for seq in names[:-1] for e in seq]
# The class name is the last name.
class_name = names[-1]
return return_type, class_name
def handle_bool(self):
pass
def handle_char(self):
pass
def handle_int(self):
pass
def handle_long(self):
pass
def handle_short(self):
pass
def handle_double(self):
pass
def handle_float(self):
pass
def handle_void(self):
pass
def handle_wchar_t(self):
pass
def handle_unsigned(self):
pass
def handle_signed(self):
pass
def _GetNestedType(self, ctor):
name = None
name_tokens, token = self.GetName()
if name_tokens:
name = ''.join([t.name for t in name_tokens])
# Handle forward declarations.
if token.token_type == tokenize.SYNTAX and token.name == ';':
return ctor(token.start, token.end, name, None,
self.namespace_stack)
if token.token_type == tokenize.NAME and self._handling_typedef:
self._AddBackToken(token)
return ctor(token.start, token.end, name, None,
self.namespace_stack)
# Must be the type declaration.
fields = list(self._GetMatchingChar('{', '}'))
del fields[-1] # Remove trailing '}'.
if token.token_type == tokenize.SYNTAX and token.name == '{':
next = self._GetNextToken()
new_type = ctor(token.start, token.end, name, fields,
self.namespace_stack)
# A name means this is an anonymous type and the name
# is the variable declaration.
if next.token_type != tokenize.NAME:
return new_type
name = new_type
token = next
# Must be variable declaration using the type prefixed with keyword.
assert token.token_type == tokenize.NAME, token
return self._CreateVariable(token, token.name, name, [], '', None)
def handle_struct(self):
# Special case the handling typedef/aliasing of structs here.
# It would be a pain to handle in the class code.
name_tokens, var_token = self.GetName()
if name_tokens:
next_token = self._GetNextToken()
is_syntax = (var_token.token_type == tokenize.SYNTAX and
var_token.name[0] in '*&')
is_variable = (var_token.token_type == tokenize.NAME and
next_token.name == ';')
variable = var_token
if is_syntax and not is_variable:
variable = next_token
temp = self._GetNextToken()
if temp.token_type == tokenize.SYNTAX and temp.name == '(':
# Handle methods declared to return a struct.
t0 = name_tokens[0]
struct = tokenize.Token(tokenize.NAME, 'struct',
t0.start-7, t0.start-2)
type_and_name = [struct]
type_and_name.extend(name_tokens)
type_and_name.extend((var_token, next_token))
return self._GetMethod(type_and_name, 0, None, False)
assert temp.name == ';', (temp, name_tokens, var_token)
if is_syntax or (is_variable and not self._handling_typedef):
modifiers = ['struct']
type_name = ''.join([t.name for t in name_tokens])
position = name_tokens[0]
return self._CreateVariable(position, variable.name, type_name,
modifiers, var_token.name, None)
name_tokens.extend((var_token, next_token))
self._AddBackTokens(name_tokens)
else:
self._AddBackToken(var_token)
return self._GetClass(Struct, VISIBILITY_PUBLIC, None)
def handle_union(self):
return self._GetNestedType(Union)
def handle_enum(self):
return self._GetNestedType(Enum)
def handle_auto(self):
# TODO(nnorwitz): warn about using auto? Probably not since it
# will be reclaimed and useful for C++0x.
pass
def handle_register(self):
pass
def handle_const(self):
pass
def handle_inline(self):
pass
def handle_extern(self):
pass
def handle_static(self):
pass
def handle_virtual(self):
# What follows must be a method.
token = token2 = self._GetNextToken()
if token.name == 'inline':
# HACK(nnorwitz): handle inline dtors by ignoring 'inline'.
token2 = self._GetNextToken()
if token2.token_type == tokenize.SYNTAX and token2.name == '~':
return self.GetMethod(FUNCTION_VIRTUAL + FUNCTION_DTOR, None)
assert token.token_type == tokenize.NAME or token.name == '::', token
return_type_and_name = self._GetTokensUpTo(tokenize.SYNTAX, '(') # )
return_type_and_name.insert(0, token)
if token2 is not token:
return_type_and_name.insert(1, token2)
return self._GetMethod(return_type_and_name, FUNCTION_VIRTUAL,
None, False)
def handle_volatile(self):
pass
def handle_mutable(self):
pass
def handle_public(self):
assert self.in_class
self.visibility = VISIBILITY_PUBLIC
def handle_protected(self):
assert self.in_class
self.visibility = VISIBILITY_PROTECTED
def handle_private(self):
assert self.in_class
self.visibility = VISIBILITY_PRIVATE
def handle_friend(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
t0 = tokens[0]
return Friend(t0.start, t0.end, tokens, self.namespace_stack)
def handle_static_cast(self):
pass
def handle_const_cast(self):
pass
def handle_dynamic_cast(self):
pass
def handle_reinterpret_cast(self):
pass
def handle_new(self):
pass
def handle_delete(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
return Delete(tokens[0].start, tokens[0].end, tokens)
def handle_typedef(self):
token = self._GetNextToken()
if (token.token_type == tokenize.NAME and
keywords.IsKeyword(token.name)):
# Token must be struct/enum/union/class.
method = getattr(self, 'handle_' + token.name)
self._handling_typedef = True
tokens = [method()]
self._handling_typedef = False
else:
tokens = [token]
# Get the remainder of the typedef up to the semi-colon.
tokens.extend(self._GetTokensUpTo(tokenize.SYNTAX, ';'))
# TODO(nnorwitz): clean all this up.
assert tokens
name = tokens.pop()
indices = name
if tokens:
indices = tokens[0]
if not indices:
indices = token
if name.name == ')':
# HACK(nnorwitz): Handle pointers to functions "properly".
if (len(tokens) >= 4 and
tokens[1].name == '(' and tokens[2].name == '*'):
tokens.append(name)
name = tokens[3]
elif name.name == ']':
# HACK(nnorwitz): Handle arrays properly.
if len(tokens) >= 2:
tokens.append(name)
name = tokens[1]
new_type = tokens
if tokens and isinstance(tokens[0], tokenize.Token):
new_type = self.converter.ToType(tokens)[0]
return Typedef(indices.start, indices.end, name.name,
new_type, self.namespace_stack)
def handle_typeid(self):
pass # Not needed yet.
def handle_typename(self):
pass # Not needed yet.
def _GetTemplatedTypes(self):
result = {}
tokens = list(self._GetMatchingChar('<', '>'))
len_tokens = len(tokens) - 1 # Ignore trailing '>'.
i = 0
while i < len_tokens:
key = tokens[i].name
i += 1
if keywords.IsKeyword(key) or key == ',':
continue
type_name = default = None
if i < len_tokens:
i += 1
if tokens[i-1].name == '=':
assert i < len_tokens, '%s %s' % (i, tokens)
default, unused_next_token = self.GetName(tokens[i:])
i += len(default)
else:
if tokens[i-1].name != ',':
# We got something like: Type variable.
# Re-adjust the key (variable) and type_name (Type).
key = tokens[i-1].name
type_name = tokens[i-2]
result[key] = (type_name, default)
return result
def handle_template(self):
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '<', token
templated_types = self._GetTemplatedTypes()
# TODO(nnorwitz): for now, just ignore the template params.
token = self._GetNextToken()
if token.token_type == tokenize.NAME:
if token.name == 'class':
return self._GetClass(Class, VISIBILITY_PRIVATE, templated_types)
elif token.name == 'struct':
return self._GetClass(Struct, VISIBILITY_PUBLIC, templated_types)
elif token.name == 'friend':
return self.handle_friend()
self._AddBackToken(token)
tokens, last = self._GetVarTokensUpTo(tokenize.SYNTAX, '(', ';')
tokens.append(last)
self._AddBackTokens(tokens)
if last.name == '(':
return self.GetMethod(FUNCTION_NONE, templated_types)
# Must be a variable definition.
return None
def handle_true(self):
pass # Nothing to do.
def handle_false(self):
pass # Nothing to do.
def handle_asm(self):
pass # Not needed yet.
def handle_class(self):
return self._GetClass(Class, VISIBILITY_PRIVATE, None)
def _GetBases(self):
# Get base classes.
bases = []
while 1:
token = self._GetNextToken()
assert token.token_type == tokenize.NAME, token
# TODO(nnorwitz): store kind of inheritance...maybe.
if token.name not in ('public', 'protected', 'private'):
# If inheritance type is not specified, it is private.
# Just put the token back so we can form a name.
# TODO(nnorwitz): it would be good to warn about this.
self._AddBackToken(token)
else:
# Check for virtual inheritance.
token = self._GetNextToken()
if token.name != 'virtual':
self._AddBackToken(token)
else:
# TODO(nnorwitz): store that we got virtual for this base.
pass
base, next_token = self.GetName()
bases_ast = self.converter.ToType(base)
assert len(bases_ast) == 1, bases_ast
bases.append(bases_ast[0])
assert next_token.token_type == tokenize.SYNTAX, next_token
if next_token.name == '{':
token = next_token
break
# Support multiple inheritance.
assert next_token.name == ',', next_token
return bases, token
def _GetClass(self, class_type, visibility, templated_types):
class_name = None
class_token = self._GetNextToken()
if class_token.token_type != tokenize.NAME:
assert class_token.token_type == tokenize.SYNTAX, class_token
token = class_token
else:
# Skip any macro (e.g. storage class specifiers) after the
# 'class' keyword.
next_token = self._GetNextToken()
if next_token.token_type == tokenize.NAME:
self._AddBackToken(next_token)
else:
self._AddBackTokens([class_token, next_token])
name_tokens, token = self.GetName()
class_name = ''.join([t.name for t in name_tokens])
bases = None
if token.token_type == tokenize.SYNTAX:
if token.name == ';':
# Forward declaration.
return class_type(class_token.start, class_token.end,
class_name, None, templated_types, None,
self.namespace_stack)
if token.name in '*&':
# Inline forward declaration. Could be method or data.
name_token = self._GetNextToken()
next_token = self._GetNextToken()
if next_token.name == ';':
# Handle data
modifiers = ['class']
return self._CreateVariable(class_token, name_token.name,
class_name,
modifiers, token.name, None)
else:
# Assume this is a method.
tokens = (class_token, token, name_token, next_token)
self._AddBackTokens(tokens)
return self.GetMethod(FUNCTION_NONE, None)
if token.name == ':':
bases, token = self._GetBases()
body = None
if token.token_type == tokenize.SYNTAX and token.name == '{':
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '{', token
ast = AstBuilder(self.GetScope(), self.filename, class_name,
visibility, self.namespace_stack)
body = list(ast.Generate())
if not self._handling_typedef:
token = self._GetNextToken()
if token.token_type != tokenize.NAME:
assert token.token_type == tokenize.SYNTAX, token
assert token.name == ';', token
else:
new_class = class_type(class_token.start, class_token.end,
class_name, bases, None,
body, self.namespace_stack)
modifiers = []
return self._CreateVariable(class_token,
token.name, new_class,
modifiers, token.name, None)
else:
if not self._handling_typedef:
self.HandleError('non-typedef token', token)
self._AddBackToken(token)
return class_type(class_token.start, class_token.end, class_name,
bases, templated_types, body, self.namespace_stack)
def handle_namespace(self):
token = self._GetNextToken()
# Support anonymous namespaces.
name = None
if token.token_type == tokenize.NAME:
name = token.name
token = self._GetNextToken()
self.namespace_stack.append(name)
assert token.token_type == tokenize.SYNTAX, token
# Create an internal token that denotes when the namespace is complete.
internal_token = tokenize.Token(_INTERNAL_TOKEN, _NAMESPACE_POP,
None, None)
internal_token.whence = token.whence
if token.name == '=':
# TODO(nnorwitz): handle aliasing namespaces.
name, next_token = self.GetName()
assert next_token.name == ';', next_token
self._AddBackToken(internal_token)
else:
assert token.name == '{', token
tokens = list(self.GetScope())
# Replace the trailing } with the internal namespace pop token.
tokens[-1] = internal_token
# Handle namespace with nothing in it.
self._AddBackTokens(tokens)
return None
def handle_using(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
return Using(tokens[0].start, tokens[0].end, tokens)
def handle_explicit(self):
assert self.in_class
# Nothing much to do.
# TODO(nnorwitz): maybe verify the method name == class name.
# This must be a ctor.
return self.GetMethod(FUNCTION_CTOR, None)
def handle_this(self):
pass # Nothing to do.
def handle_operator(self):
# Pull off the next token(s?) and make that part of the method name.
pass
def handle_sizeof(self):
pass
def handle_case(self):
pass
def handle_switch(self):
pass
def handle_default(self):
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX
assert token.name == ':'
def handle_if(self):
pass
def handle_else(self):
pass
def handle_return(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
if not tokens:
return Return(self.current_token.start, self.current_token.end, None)
return Return(tokens[0].start, tokens[0].end, tokens)
def handle_goto(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert len(tokens) == 1, str(tokens)
return Goto(tokens[0].start, tokens[0].end, tokens[0].name)
def handle_try(self):
pass # Not needed yet.
def handle_catch(self):
pass # Not needed yet.
def handle_throw(self):
pass # Not needed yet.
def handle_while(self):
pass
def handle_do(self):
pass
def handle_for(self):
pass
def handle_break(self):
self._IgnoreUpTo(tokenize.SYNTAX, ';')
def handle_continue(self):
self._IgnoreUpTo(tokenize.SYNTAX, ';')
def BuilderFromSource(source, filename):
"""Utility method that returns an AstBuilder from source code.
Args:
source: 'C++ source code'
filename: 'file1'
Returns:
AstBuilder
"""
return AstBuilder(tokenize.GetTokens(source), filename)
def PrintIndentifiers(filename, should_print):
"""Prints all identifiers for a C++ source file.
Args:
filename: 'file1'
should_print: predicate with signature: bool Function(token)
"""
source = utils.ReadFile(filename, False)
if source is None:
sys.stderr.write('Unable to find: %s\n' % filename)
return
#print('Processing %s' % actual_filename)
builder = BuilderFromSource(source, filename)
try:
for node in builder.Generate():
if should_print(node):
print(node.name)
except KeyboardInterrupt:
return
except:
pass
def PrintAllIndentifiers(filenames, should_print):
"""Prints all identifiers for each C++ source file in filenames.
Args:
filenames: ['file1', 'file2', ...]
should_print: predicate with signature: bool Function(token)
"""
for path in filenames:
PrintIndentifiers(path, should_print)
def main(argv):
for filename in argv[1:]:
source = utils.ReadFile(filename)
if source is None:
continue
print('Processing %s' % filename)
builder = BuilderFromSource(source, filename)
try:
entire_ast = filter(None, builder.Generate())
except KeyboardInterrupt:
return
except:
# Already printed a warning, print the traceback and continue.
traceback.print_exc()
else:
if utils.DEBUG:
for ast in entire_ast:
print(ast)
if __name__ == '__main__':
main(sys.argv)
| mit |
loopCM/chromium | build/gyp_helper.py | 57 | 1598 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file helps gyp_chromium and landmines correctly set up the gyp
# environment from chromium.gyp_env on disk
import os
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
CHROME_SRC = os.path.dirname(SCRIPT_DIR)
def apply_gyp_environment_from_file(file_path):
"""Reads in a *.gyp_env file and applies the valid keys to os.environ."""
if not os.path.exists(file_path):
return
with open(file_path, 'rU') as f:
file_contents = f.read()
try:
file_data = eval(file_contents, {'__builtins__': None}, None)
except SyntaxError, e:
e.filename = os.path.abspath(file_path)
raise
supported_vars = (
'CC',
'CC_wrapper',
'CHROMIUM_GYP_FILE',
'CHROMIUM_GYP_SYNTAX_CHECK',
'CXX',
'CXX_wrapper',
'GYP_DEFINES',
'GYP_GENERATOR_FLAGS',
'GYP_CROSSCOMPILE',
'GYP_GENERATOR_OUTPUT',
'GYP_GENERATORS',
)
for var in supported_vars:
file_val = file_data.get(var)
if file_val:
if var in os.environ:
print 'INFO: Environment value for "%s" overrides value in %s.' % (
var, os.path.abspath(file_path)
)
else:
os.environ[var] = file_val
def apply_chromium_gyp_env():
if 'SKIP_CHROMIUM_GYP_ENV' not in os.environ:
# Update the environment based on chromium.gyp_env
path = os.path.join(os.path.dirname(CHROME_SRC), 'chromium.gyp_env')
apply_gyp_environment_from_file(path)
| bsd-3-clause |
plamut/superdesk | server/apps/legal_archive/service.py | 4 | 6656 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import logging
from eve.versioning import versioned_id_field
from flask import g, current_app as app
from eve.utils import config, ParsedRequest
from .resource import LEGAL_ARCHIVE_NAME
from superdesk import Service, get_resource_privileges
from superdesk.errors import SuperdeskApiError
from superdesk.metadata.item import ITEM_TYPE, GUID_FIELD, CONTENT_TYPE
from superdesk.metadata.packages import GROUPS, RESIDREF, REFS
from superdesk.utils import ListCursor
logger = logging.getLogger(__name__)
class LegalService(Service):
"""
Base Service Class for Legal Archive related services
"""
def on_create(self, docs):
"""
Overriding to replace the location of each item in the package to legal archive instead of archive,
if doc is a pacakge.
"""
super().on_create(docs)
for doc in docs:
if ITEM_TYPE in doc:
doc.setdefault(config.ID_FIELD, doc[GUID_FIELD])
if doc[ITEM_TYPE] == CONTENT_TYPE.COMPOSITE:
self._change_location_of_items_in_package(doc)
def on_replace(self, document, original):
"""
Overriding to replace the location of each item in the package to legal archive instead of archive,
if doc is a pacakge.
"""
super().on_replace(document, original)
if document.get(ITEM_TYPE) == CONTENT_TYPE.COMPOSITE:
self._change_location_of_items_in_package(document)
def get(self, req, lookup):
"""
Overriding to check if user is authorized to perform get operation on Legal Archive resources. If authorized
then request is forwarded otherwise throws forbidden error.
:return: list of docs matching query in req and lookup
:raises: SuperdeskApiError.forbiddenError() if user is unauthorized to access the Legal Archive resources.
"""
self.check_get_access_privilege()
return super().get(req, lookup)
def find_one(self, req, **lookup):
"""
Overriding to check if user is authorized to perform get operation on Legal Archive resources. If authorized
then request is forwarded otherwise throws forbidden error.
:return: doc if there is one matching the query in req and lookup
:raises: SuperdeskApiError.forbiddenError() if user is unauthorized to access the Legal Archive resources.
"""
self.check_get_access_privilege()
return super().find_one(req, **lookup)
def check_get_access_privilege(self):
"""
Checks if user is authorized to perform get operation on Legal Archive resources. If authorized then request is
forwarded otherwise throws forbidden error.
:raises: SuperdeskApiError.forbiddenError() if user is unauthorized to access the Legal Archive resources.
"""
if not hasattr(g, 'user'):
return
privileges = g.user.get('active_privileges', {})
resource_privileges = get_resource_privileges(self.datasource).get('GET', None)
if privileges.get(resource_privileges, 0) == 0:
raise SuperdeskApiError.forbiddenError()
def enhance(self, legal_archive_docs):
"""
Enhances the item in Legal Archive Service
:param legal_archive_docs:
"""
if isinstance(legal_archive_docs, list):
for legal_archive_doc in legal_archive_docs:
legal_archive_doc['_type'] = LEGAL_ARCHIVE_NAME
else:
legal_archive_docs['_type'] = LEGAL_ARCHIVE_NAME
def _change_location_of_items_in_package(self, package):
"""
Changes location of each item in the package to legal archive instead of archive.
"""
for group in package.get(GROUPS, []):
for ref in group.get(REFS, []):
if RESIDREF in ref:
ref['location'] = LEGAL_ARCHIVE_NAME
class LegalArchiveService(LegalService):
def on_fetched(self, docs):
"""
Overriding this to enhance the published article with the one in archive collection
"""
self.enhance(docs[config.ITEMS])
def on_fetched_item(self, doc):
"""
Overriding this to enhance the published article with the one in archive collection
"""
self.enhance(doc)
class LegalPublishQueueService(LegalService):
def create(self, docs, **kwargs):
"""
Overriding this from preventing the transmission details again. This happens when an item in a package expires
at later point of time. In this case, the call to insert transmission details happens twice once when the
package expires and once when the item expires.
"""
ids = []
for doc in docs:
doc_if_exists = self.find_one(req=None, _id=doc['_id'])
if doc_if_exists is None:
ids.extend(super().create([doc]))
return ids
class LegalArchiveVersionsService(LegalService):
def create(self, docs, **kwargs):
"""
Overriding this from preventing the same version again. This happens when an item is published more than once.
"""
ids = []
for doc in docs:
doc_if_exists = None
if config.ID_FIELD in doc: # This happens when inserting docs from pre-populate command
doc_if_exists = self.find_one(req=None, _id=doc['_id'])
if doc_if_exists is None:
ids.extend(super().create([doc]))
return ids
def get(self, req, lookup):
"""
Version of an article in Legal Archive isn't maintained by Eve. Overriding this to fetch the version history.
"""
resource_def = app.config['DOMAIN'][LEGAL_ARCHIVE_NAME]
id_field = versioned_id_field(resource_def)
if req and req.args and req.args.get(config.ID_FIELD):
version_history = list(super().get_from_mongo(req=ParsedRequest(),
lookup={id_field: req.args.get(config.ID_FIELD)}))
else:
version_history = list(super().get_from_mongo(req=req, lookup=lookup))
for doc in version_history:
doc[config.ID_FIELD] = doc[id_field]
self.enhance(doc)
return ListCursor(version_history)
| agpl-3.0 |
dzz007/photivo | scons-local-2.2.0/SCons/Tool/mwld.py | 14 | 3666 | """SCons.Tool.mwld
Tool-specific initialization for the Metrowerks CodeWarrior linker.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/mwld.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import SCons.Tool
def generate(env):
"""Add Builders and construction variables for lib to an Environment."""
SCons.Tool.createStaticLibBuilder(env)
SCons.Tool.createSharedLibBuilder(env)
SCons.Tool.createProgBuilder(env)
env['AR'] = 'mwld'
env['ARCOM'] = '$AR $ARFLAGS -library -o $TARGET $SOURCES'
env['LIBDIRPREFIX'] = '-L'
env['LIBDIRSUFFIX'] = ''
env['LIBLINKPREFIX'] = '-l'
env['LIBLINKSUFFIX'] = '.lib'
env['LINK'] = 'mwld'
env['LINKCOM'] = '$LINK $LINKFLAGS -o $TARGET $SOURCES $_LIBDIRFLAGS $_LIBFLAGS'
env['SHLINK'] = '$LINK'
env['SHLINKFLAGS'] = '$LINKFLAGS'
env['SHLINKCOM'] = shlib_action
env['SHLIBEMITTER']= shlib_emitter
def exists(env):
import SCons.Tool.mwcc
return SCons.Tool.mwcc.set_vars(env)
def shlib_generator(target, source, env, for_signature):
cmd = ['$SHLINK', '$SHLINKFLAGS', '-shared']
no_import_lib = env.get('no_import_lib', 0)
if no_import_lib: cmd.extend('-noimplib')
dll = env.FindIxes(target, 'SHLIBPREFIX', 'SHLIBSUFFIX')
if dll: cmd.extend(['-o', dll])
implib = env.FindIxes(target, 'LIBPREFIX', 'LIBSUFFIX')
if implib: cmd.extend(['-implib', implib.get_string(for_signature)])
cmd.extend(['$SOURCES', '$_LIBDIRFLAGS', '$_LIBFLAGS'])
return [cmd]
def shlib_emitter(target, source, env):
dll = env.FindIxes(target, 'SHLIBPREFIX', 'SHLIBSUFFIX')
no_import_lib = env.get('no_import_lib', 0)
if not dll:
raise SCons.Errors.UserError("A shared library should have exactly one target with the suffix: %s" % env.subst("$SHLIBSUFFIX"))
if not no_import_lib and \
not env.FindIxes(target, 'LIBPREFIX', 'LIBSUFFIX'):
# Append an import library to the list of targets.
target.append(env.ReplaceIxes(dll,
'SHLIBPREFIX', 'SHLIBSUFFIX',
'LIBPREFIX', 'LIBSUFFIX'))
return target, source
shlib_action = SCons.Action.Action(shlib_generator, generator=1)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-3.0 |
Lektorium-LLC/edx-platform | openedx/core/lib/token_utils.py | 11 | 4236 | """Utilities for working with ID tokens."""
import json
from time import time
from Cryptodome.PublicKey import RSA
from django.conf import settings
from django.utils.functional import cached_property
from jwkest.jwk import KEYS, RSAKey
from jwkest.jws import JWS
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from student.models import UserProfile, anonymous_id_for_user
class JwtBuilder(object):
"""Utility for building JWTs.
Unifies diverse approaches to JWT creation in a single class. This utility defaults to using the system's
JWT configuration.
NOTE: This utility class will allow you to override the signing key and audience claim to support those
clients which still require this. This approach to JWT creation is DEPRECATED. Avoid doing this for new clients.
Arguments:
user (User): User for which to generate the JWT.
Keyword Arguments:
asymmetric (Boolean): Whether the JWT should be signed with this app's private key.
secret (string): Overrides configured JWT secret (signing) key. Unused if an asymmetric signature is requested.
"""
def __init__(self, user, asymmetric=False, secret=None):
self.user = user
self.asymmetric = asymmetric
self.secret = secret
self.jwt_auth = configuration_helpers.get_value('JWT_AUTH', settings.JWT_AUTH)
def build_token(self, scopes, expires_in=None, aud=None, additional_claims=None):
"""Returns a JWT access token.
Arguments:
scopes (list): Scopes controlling which optional claims are included in the token.
Keyword Arguments:
expires_in (int): Time to token expiry, specified in seconds.
aud (string): Overrides configured JWT audience claim.
additional_claims (dict): Additional claims to include in the token.
Returns:
str: Encoded JWT
"""
now = int(time())
expires_in = expires_in or self.jwt_auth['JWT_EXPIRATION']
payload = {
# TODO Consider getting rid of this claim since we don't use it.
'aud': aud if aud else self.jwt_auth['JWT_AUDIENCE'],
'exp': now + expires_in,
'iat': now,
'iss': self.jwt_auth['JWT_ISSUER'],
'preferred_username': self.user.username,
'scopes': scopes,
'sub': anonymous_id_for_user(self.user, None),
}
if additional_claims:
payload.update(additional_claims)
for scope in scopes:
handler = self.claim_handlers.get(scope)
if handler:
handler(payload)
return self.encode(payload)
@cached_property
def claim_handlers(self):
"""Returns a dictionary mapping scopes to methods that will add claims to the JWT payload."""
return {
'email': self.attach_email_claim,
'profile': self.attach_profile_claim
}
def attach_email_claim(self, payload):
"""Add the email claim details to the JWT payload."""
payload['email'] = self.user.email
def attach_profile_claim(self, payload):
"""Add the profile claim details to the JWT payload."""
try:
# Some users (e.g., service users) may not have user profiles.
name = UserProfile.objects.get(user=self.user).name
except UserProfile.DoesNotExist:
name = None
payload.update({
'name': name,
'family_name': self.user.last_name,
'given_name': self.user.first_name,
'administrator': self.user.is_staff,
})
def encode(self, payload):
"""Encode the provided payload."""
keys = KEYS()
if self.asymmetric:
keys.add(RSAKey(key=RSA.importKey(settings.JWT_PRIVATE_SIGNING_KEY)))
algorithm = 'RS512'
else:
key = self.secret if self.secret else self.jwt_auth['JWT_SECRET_KEY']
keys.add({'key': key, 'kty': 'oct'})
algorithm = self.jwt_auth['JWT_ALGORITHM']
data = json.dumps(payload)
jws = JWS(data, alg=algorithm)
return jws.sign_compact(keys=keys)
| agpl-3.0 |
8u1a/plaso | plaso/parsers/java_idx.py | 3 | 8539 | # -*- coding: utf-8 -*-
"""Parser for Java Cache IDX files."""
# TODO:
# * 6.02 files did not retain IP addresses. However, the
# deploy_resource_codebase header field may contain the host IP.
# This needs to be researched further, as that field may not always
# be present. 6.02 files will currently return 'Unknown'.
import os
import construct
from plaso.events import time_events
from plaso.lib import errors
from plaso.lib import eventdata
from plaso.lib import timelib
from plaso.parsers import interface
from plaso.parsers import manager
class JavaIDXEvent(time_events.TimestampEvent):
"""Convenience class for a Java IDX cache file download event."""
DATA_TYPE = u'java:download:idx'
def __init__(
self, timestamp, timestamp_description, idx_version, url, ip_address):
"""Initializes the event object.
Args:
timestamp: The timestamp value.
timestamp_description: The description of the usage of the time value.
idx_version: Version of IDX file.
url: URL of the downloaded file.
ip_address: IP address of the host in the URL.
"""
super(JavaIDXEvent, self).__init__(timestamp, timestamp_description)
self.idx_version = idx_version
self.url = url
self.ip_address = ip_address
class JavaIDXParser(interface.SingleFileBaseParser):
"""Parse Java WebStart Cache IDX files for download events.
There are five structures defined. 6.02 files had one generic section
that retained all data. From 6.03, the file went to a multi-section
format where later sections were optional and had variable-lengths.
6.03, 6.04, and 6.05 files all have their main data section (#2)
begin at offset 128. The short structure is because 6.05 files
deviate after the 8th byte. So, grab the first 8 bytes to ensure it's
valid, get the file version, then continue on with the correct
structures.
"""
_INITIAL_FILE_OFFSET = None
NAME = u'java_idx'
DESCRIPTION = u'Parser for Java WebStart Cache IDX files.'
IDX_SHORT_STRUCT = construct.Struct(
u'magic',
construct.UBInt8(u'busy'),
construct.UBInt8(u'incomplete'),
construct.UBInt32(u'idx_version'))
IDX_602_STRUCT = construct.Struct(
u'IDX_602_Full',
construct.UBInt16(u'null_space'),
construct.UBInt8(u'shortcut'),
construct.UBInt32(u'content_length'),
construct.UBInt64(u'last_modified_date'),
construct.UBInt64(u'expiration_date'),
construct.PascalString(
u'version_string', length_field=construct.UBInt16(u'length')),
construct.PascalString(
u'url', length_field=construct.UBInt16(u'length')),
construct.PascalString(
u'namespace', length_field=construct.UBInt16(u'length')),
construct.UBInt32(u'FieldCount'))
IDX_605_SECTION_ONE_STRUCT = construct.Struct(
u'IDX_605_Section1',
construct.UBInt8(u'shortcut'),
construct.UBInt32(u'content_length'),
construct.UBInt64(u'last_modified_date'),
construct.UBInt64(u'expiration_date'),
construct.UBInt64(u'validation_date'),
construct.UBInt8(u'signed'),
construct.UBInt32(u'sec2len'),
construct.UBInt32(u'sec3len'),
construct.UBInt32(u'sec4len'))
IDX_605_SECTION_TWO_STRUCT = construct.Struct(
u'IDX_605_Section2',
construct.PascalString(
u'version', length_field=construct.UBInt16(u'length')),
construct.PascalString(
u'url', length_field=construct.UBInt16(u'length')),
construct.PascalString(
u'namespec', length_field=construct.UBInt16(u'length')),
construct.PascalString(
u'ip_address', length_field=construct.UBInt16(u'length')),
construct.UBInt32(u'FieldCount'))
# Java uses Pascal-style strings, but with a 2-byte length field.
JAVA_READUTF_STRING = construct.Struct(
u'Java.ReadUTF',
construct.PascalString(
u'string', length_field=construct.UBInt16(u'length')))
def ParseFileObject(self, parser_mediator, file_object, **kwargs):
"""Parses a Java WebStart Cache IDX file-like object.
Args:
parser_mediator: A parser context object (instance of ParserContext).
file_object: A file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
file_object.seek(0, os.SEEK_SET)
try:
magic = self.IDX_SHORT_STRUCT.parse_stream(file_object)
except (IOError, construct.FieldError) as exception:
raise errors.UnableToParseFile(
u'Unable to parse Java IDX file with error: {0:s}.'.format(exception))
# Fields magic.busy and magic.incomplete are normally 0x00. They
# are set to 0x01 if the file is currently being downloaded. Logic
# checks for > 1 to avoid a race condition and still reject any
# file with other data.
# Field magic.idx_version is the file version, of which only
# certain versions are supported.
if magic.busy > 1 or magic.incomplete > 1:
raise errors.UnableToParseFile(u'Not a valid Java IDX file')
if not magic.idx_version in [602, 603, 604, 605]:
raise errors.UnableToParseFile(u'Not a valid Java IDX file')
# Obtain the relevant values from the file. The last modified date
# denotes when the file was last modified on the HOST. For example,
# when the file was uploaded to a web server.
if magic.idx_version == 602:
section_one = self.IDX_602_STRUCT.parse_stream(file_object)
last_modified_date = section_one.last_modified_date
url = section_one.url
ip_address = u'Unknown'
http_header_count = section_one.FieldCount
elif magic.idx_version in [603, 604, 605]:
# IDX 6.03 and 6.04 have two unused bytes before the structure.
if magic.idx_version in [603, 604]:
file_object.read(2)
# IDX 6.03, 6.04, and 6.05 files use the same structures for the
# remaining data.
section_one = self.IDX_605_SECTION_ONE_STRUCT.parse_stream(file_object)
last_modified_date = section_one.last_modified_date
if file_object.get_size() > 128:
file_object.seek(128, os.SEEK_SET) # Static offset for section 2.
section_two = self.IDX_605_SECTION_TWO_STRUCT.parse_stream(file_object)
url = section_two.url
ip_address = section_two.ip_address
http_header_count = section_two.FieldCount
else:
url = u'Unknown'
ip_address = u'Unknown'
http_header_count = 0
# File offset is now just prior to HTTP headers. Make sure there
# are headers, and then parse them to retrieve the download date.
download_date = None
for field in range(0, http_header_count):
field = self.JAVA_READUTF_STRING.parse_stream(file_object)
value = self.JAVA_READUTF_STRING.parse_stream(file_object)
if field.string == u'date':
# Time string "should" be in UTC or have an associated time zone
# information in the string itself. If that is not the case then
# there is no reliable method for plaso to determine the proper
# timezone, so the assumption is that it is UTC.
try:
download_date = timelib.Timestamp.FromTimeString(
value.string, gmt_as_timezone=False)
except errors.TimestampError:
download_date = None
parser_mediator.ProduceParseError(
u'Unable to parse time value: {0:s}'.format(value.string))
if not url or not ip_address:
raise errors.UnableToParseFile(
u'Unexpected Error: URL or IP address not found in file.')
last_modified_timestamp = timelib.Timestamp.FromJavaTime(
last_modified_date)
# TODO: Move the timestamp description fields into eventdata.
event_object = JavaIDXEvent(
last_modified_timestamp, u'File Hosted Date', magic.idx_version, url,
ip_address)
parser_mediator.ProduceEvent(event_object)
if section_one:
expiration_date = section_one.get(u'expiration_date', None)
if expiration_date:
expiration_timestamp = timelib.Timestamp.FromJavaTime(expiration_date)
event_object = JavaIDXEvent(
expiration_timestamp, u'File Expiration Date', magic.idx_version,
url, ip_address)
parser_mediator.ProduceEvent(event_object)
if download_date:
event_object = JavaIDXEvent(
download_date, eventdata.EventTimestamp.FILE_DOWNLOADED,
magic.idx_version, url, ip_address)
parser_mediator.ProduceEvent(event_object)
manager.ParsersManager.RegisterParser(JavaIDXParser)
| apache-2.0 |
snyaggarwal/pex | tests/test_environment.py | 5 | 3792 | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
from contextlib import contextmanager
from twitter.common.contextutil import temporary_dir
from pex.compatibility import nested
from pex.environment import PEXEnvironment
from pex.pex_builder import PEXBuilder
from pex.pex_info import PexInfo
from pex.testing import make_bdist, temporary_filename
@contextmanager
def yield_pex_builder(zip_safe=True):
with nested(temporary_dir(), make_bdist('p1', zipped=True, zip_safe=zip_safe)) as (td, p1):
pb = PEXBuilder(path=td)
pb.add_egg(p1.location)
yield pb
def test_force_local():
with nested(yield_pex_builder(), temporary_dir(), temporary_filename()) as (
pb, pex_root, pex_file):
pb.info.pex_root = pex_root
pb.build(pex_file)
code_cache = PEXEnvironment.force_local(pex_file, pb.info)
assert os.path.exists(pb.info.zip_unsafe_cache)
assert len(os.listdir(pb.info.zip_unsafe_cache)) == 1
assert [os.path.basename(code_cache)] == os.listdir(pb.info.zip_unsafe_cache)
assert set(os.listdir(code_cache)) == set([PexInfo.PATH, '__main__.py', '__main__.pyc'])
# idempotence
assert PEXEnvironment.force_local(pex_file, pb.info) == code_cache
def normalize(path):
return os.path.normpath(os.path.realpath(path)).lower()
def test_write_zipped_internal_cache():
# zip_safe pex will not be written to install cache unless always_write_cache
with nested(yield_pex_builder(zip_safe=True), temporary_dir(), temporary_filename()) as (
pb, pex_root, pex_file):
pb.info.pex_root = pex_root
pb.build(pex_file)
existing, new, zip_safe = PEXEnvironment.write_zipped_internal_cache(pex_file, pb.info)
assert len(zip_safe) == 1
assert normalize(zip_safe[0].location).startswith(
normalize(os.path.join(pex_file, pb.info.internal_cache))), (
'loc: %s, cache: %s' % (
normalize(zip_safe[0].location),
normalize(os.path.join(pex_file, pb.info.internal_cache))))
pb.info.always_write_cache = True
existing, new, zip_safe = PEXEnvironment.write_zipped_internal_cache(pex_file, pb.info)
assert len(new) == 1
assert normalize(new[0].location).startswith(normalize(pb.info.install_cache))
# Check that we can read from the cache
existing, new, zip_safe = PEXEnvironment.write_zipped_internal_cache(pex_file, pb.info)
assert len(existing) == 1
assert normalize(existing[0].location).startswith(normalize(pb.info.install_cache))
# non-zip_safe pex will be written to install cache
with nested(yield_pex_builder(zip_safe=False), temporary_dir(), temporary_filename()) as (
pb, pex_root, pex_file):
pb.info.pex_root = pex_root
pb.build(pex_file)
existing, new, zip_safe = PEXEnvironment.write_zipped_internal_cache(pex_file, pb.info)
assert len(new) == 1
assert normalize(new[0].location).startswith(normalize(pb.info.install_cache))
original_location = normalize(new[0].location)
# do the second time to validate idempotence of caching
existing, new, zip_safe = PEXEnvironment.write_zipped_internal_cache(pex_file, pb.info)
assert len(existing) == 1
assert normalize(existing[0].location) == original_location
def test_load_internal_cache_unzipped():
# zip_safe pex will not be written to install cache unless always_write_cache
with nested(yield_pex_builder(zip_safe=True), temporary_dir()) as (pb, pex_root):
pb.info.pex_root = pex_root
pb.freeze()
dists = list(PEXEnvironment.load_internal_cache(pb.path(), pb.info))
assert len(dists) == 1
assert normalize(dists[0].location).startswith(
normalize(os.path.join(pb.path(), pb.info.internal_cache)))
| apache-2.0 |
MIPS/external-chromium_org-third_party-skia | tools/test_pdfs.py | 231 | 1801 | '''
Compares the rendererings of serialized SkPictures to expected images.
Launch with --help to see more information.
Copyright 2012 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
'''
# common Python modules
import os
import optparse
import sys
import shutil
import tempfile
import test_rendering
USAGE_STRING = 'Usage: %s input... expectedDir'
HELP_STRING = '''
Takes input SkPicture files and renders them as PDF files, and then compares
those resulting PDF files against PDF files found in expectedDir.
Each instance of "input" can be either a file (name must end in .skp), or a
directory (in which case this script will process all .skp files within the
directory).
'''
def Main(args):
"""Allow other scripts to call this script with fake command-line args.
@param The commandline argument list
"""
parser = optparse.OptionParser(USAGE_STRING % '%prog' + HELP_STRING)
parser.add_option('--render_dir', dest='render_dir',
help = ('specify the location to output the rendered '
'files. Default is a temp directory.'))
parser.add_option('--diff_dir', dest='diff_dir',
help = ('specify the location to output the diff files. '
'Default is a temp directory.'))
options, arguments = parser.parse_args(args)
if (len(arguments) < 3):
print("Expected at least one input and one ouput folder.")
parser.print_help()
sys.exit(-1)
inputs = arguments[1:-1]
expected_dir = arguments[-1]
test_rendering.TestRenderSkps(inputs, expected_dir, options.render_dir,
options.diff_dir, 'render_pdfs', '')
if __name__ == '__main__':
Main(sys.argv)
| bsd-3-clause |
unix-beard/matasano | set1/detect_single_character_xor/detect_single_character_xor.py | 1 | 1514 | #!/usr/bin/env python3
################################################################################
# The matasano crypto challenges
# http://cryptopals.com/sets/1/challenges/4/
# Set 1 Challenge 4
# Detect single-character XOR
################################################################################
# One of the 60-character strings in the input file has been encrypted
# by single-character XOR. Find it.
# Key: int=53, char='5'
# Message: Now that the party is jumping
#
# NOTE: This implementation is strictly sequential
################################################################################
import sys
import string
def find_key(key, tuple_):
return chr(int(tuple_[0] + tuple_[1], base=16) ^ key)
def decode_with_key(key, s):
decoded_msg = ''
for t in zip(s[0::2], s[1::2]):
decoded_msg += find_key(key, t)
if len([c for c in decoded_msg if c in string.ascii_letters + ' \n']) == len(decoded_msg):
print('[*] Trying the key: int: {0}, char: {1}'.format(key, chr(key)))
print('Decoded message: {0}'.format(decoded_msg))
def decode(s):
print('Decoding [{0}]'.format(s))
for key in range(0, 256):
decode_with_key(key, s)
def remove_eol(s):
"""Removes trailing '\n' if there is one"""
return s[0:len(s) - 1] if s[len(s) - 1] == '\n' else s
def main():
with open(sys.argv[1], 'r') as f:
for encoded_str in f:
decode(remove_eol(encoded_str))
if __name__ == '__main__':
main()
| mit |
igabr/Metis_Projects_Chicago_2017 | 03-Project-McNulty/web_app/src/flask-lesscss/docs/conf.py | 6 | 6500 | # -*- coding: utf-8 -*-
#
# flask-lesscss documentation build configuration file, created by
# sphinx-quickstart on Tue May 11 18:54:04 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('_themes'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'flask-lesscss'
copyright = u'2010, Steve Losh'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9.1'
# The full version, including alpha/beta/rc tags.
release = '0.9.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'flask_small'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'flask-lesscssdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'flask-lesscss.tex', u'flask-lesscss Documentation',
u'Steve Losh', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| mit |
fajoy/nova | nova/api/openstack/urlmap.py | 12 | 10628 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import paste.urlmap
import re
import urllib2
from nova.api.openstack import wsgi
from nova.openstack.common import log as logging
_quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"'
_option_header_piece_re = re.compile(r';\s*([^\s;=]+|%s)\s*'
r'(?:=\s*([^;]+|%s))?\s*' %
(_quoted_string_re, _quoted_string_re))
LOG = logging.getLogger(__name__)
def unquote_header_value(value):
"""Unquotes a header value.
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
return value
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in urllib2.parse_http_list(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
def parse_options_header(value):
"""Parse a ``Content-Type`` like header into a tuple with the content
type and the options:
>>> parse_options_header('Content-Type: text/html; mimetype=text/html')
('Content-Type:', {'mimetype': 'text/html'})
:param value: the header to parse.
:return: (str, options)
"""
def _tokenize(string):
for match in _option_header_piece_re.finditer(string):
key, value = match.groups()
key = unquote_header_value(key)
if value is not None:
value = unquote_header_value(value)
yield key, value
if not value:
return '', {}
parts = _tokenize(';' + value)
name = parts.next()[0]
extra = dict(parts)
return name, extra
class Accept(object):
def __init__(self, value):
self._content_types = [parse_options_header(v) for v in
parse_list_header(value)]
def best_match(self, supported_content_types):
# FIXME: Should we have a more sophisticated matching algorithm that
# takes into account the version as well?
best_quality = -1
best_content_type = None
best_params = {}
best_match = '*/*'
for content_type in supported_content_types:
for content_mask, params in self._content_types:
try:
quality = float(params.get('q', 1))
except ValueError:
continue
if quality < best_quality:
continue
elif best_quality == quality:
if best_match.count('*') <= content_mask.count('*'):
continue
if self._match_mask(content_mask, content_type):
best_quality = quality
best_content_type = content_type
best_params = params
best_match = content_mask
return best_content_type, best_params
def content_type_params(self, best_content_type):
"""Find parameters in Accept header for given content type."""
for content_type, params in self._content_types:
if best_content_type == content_type:
return params
return {}
def _match_mask(self, mask, content_type):
if '*' not in mask:
return content_type == mask
if mask == '*/*':
return True
mask_major = mask[:-2]
content_type_major = content_type.split('/', 1)[0]
return content_type_major == mask_major
def urlmap_factory(loader, global_conf, **local_conf):
if 'not_found_app' in local_conf:
not_found_app = local_conf.pop('not_found_app')
else:
not_found_app = global_conf.get('not_found_app')
if not_found_app:
not_found_app = loader.get_app(not_found_app, global_conf=global_conf)
urlmap = URLMap(not_found_app=not_found_app)
for path, app_name in local_conf.items():
path = paste.urlmap.parse_path_expression(path)
app = loader.get_app(app_name, global_conf=global_conf)
urlmap[path] = app
return urlmap
class URLMap(paste.urlmap.URLMap):
def _match(self, host, port, path_info):
"""Find longest match for a given URL path."""
for (domain, app_url), app in self.applications:
if domain and domain != host and domain != host + ':' + port:
continue
if (path_info == app_url
or path_info.startswith(app_url + '/')):
return app, app_url
return None, None
def _set_script_name(self, app, app_url):
def wrap(environ, start_response):
environ['SCRIPT_NAME'] += app_url
return app(environ, start_response)
return wrap
def _munge_path(self, app, path_info, app_url):
def wrap(environ, start_response):
environ['SCRIPT_NAME'] += app_url
environ['PATH_INFO'] = path_info[len(app_url):]
return app(environ, start_response)
return wrap
def _path_strategy(self, host, port, path_info):
"""Check path suffix for MIME type and path prefix for API version."""
mime_type = app = app_url = None
parts = path_info.rsplit('.', 1)
if len(parts) > 1:
possible_type = 'application/' + parts[1]
if possible_type in wsgi.SUPPORTED_CONTENT_TYPES:
mime_type = possible_type
parts = path_info.split('/')
if len(parts) > 1:
possible_app, possible_app_url = self._match(host, port, path_info)
# Don't use prefix if it ends up matching default
if possible_app and possible_app_url:
app_url = possible_app_url
app = self._munge_path(possible_app, path_info, app_url)
return mime_type, app, app_url
def _content_type_strategy(self, host, port, environ):
"""Check Content-Type header for API version."""
app = None
params = parse_options_header(environ.get('CONTENT_TYPE', ''))[1]
if 'version' in params:
app, app_url = self._match(host, port, '/v' + params['version'])
if app:
app = self._set_script_name(app, app_url)
return app
def _accept_strategy(self, host, port, environ, supported_content_types):
"""Check Accept header for best matching MIME type and API version."""
accept = Accept(environ.get('HTTP_ACCEPT', ''))
app = None
# Find the best match in the Accept header
mime_type, params = accept.best_match(supported_content_types)
if 'version' in params:
app, app_url = self._match(host, port, '/v' + params['version'])
if app:
app = self._set_script_name(app, app_url)
return mime_type, app
def __call__(self, environ, start_response):
host = environ.get('HTTP_HOST', environ.get('SERVER_NAME')).lower()
if ':' in host:
host, port = host.split(':', 1)
else:
if environ['wsgi.url_scheme'] == 'http':
port = '80'
else:
port = '443'
path_info = environ['PATH_INFO']
path_info = self.normalize_url(path_info, False)[1]
# The MIME type for the response is determined in one of two ways:
# 1) URL path suffix (eg /servers/detail.json)
# 2) Accept header (eg application/json;q=0.8, application/xml;q=0.2)
# The API version is determined in one of three ways:
# 1) URL path prefix (eg /v1.1/tenant/servers/detail)
# 2) Content-Type header (eg application/json;version=1.1)
# 3) Accept header (eg application/json;q=0.8;version=1.1)
supported_content_types = list(wsgi.SUPPORTED_CONTENT_TYPES)
mime_type, app, app_url = self._path_strategy(host, port, path_info)
# Accept application/atom+xml for the index query of each API
# version mount point as well as the root index
if (app_url and app_url + '/' == path_info) or path_info == '/':
supported_content_types.append('application/atom+xml')
if not app:
app = self._content_type_strategy(host, port, environ)
if not mime_type or not app:
possible_mime_type, possible_app = self._accept_strategy(
host, port, environ, supported_content_types)
if possible_mime_type and not mime_type:
mime_type = possible_mime_type
if possible_app and not app:
app = possible_app
if not mime_type:
mime_type = 'application/json'
if not app:
# Didn't match a particular version, probably matches default
app, app_url = self._match(host, port, path_info)
if app:
app = self._munge_path(app, path_info, app_url)
if app:
environ['nova.best_content_type'] = mime_type
return app(environ, start_response)
environ['paste.urlmap_object'] = self
return self.not_found_application(environ, start_response)
| apache-2.0 |
RaphaelKimmig/django_helpful | django_helpful/__init__.py | 1 | 1416 | # Copyright (c) 2013, Raphael Kimmig
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer. Redistributions in binary
# form must reproduce the above copyright notice, this list of conditions and
# the following disclaimer in the documentation and/or other materials provided
# with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS
# AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .utils import *
try:
from .test_runners import *
except ImportError:
pass
| bsd-2-clause |
looker/sdk-examples | python/soft_delete_dashboard.py | 1 | 1367 | import sys
from typing import Sequence
import exceptions
from looker_sdk import client, error, models
sdk = client.setup("../looker.ini")
def main():
"""Given a dashboard title, get the ids of all dashboards with matching titles
and move them to trash.
$ python soft_delete_dashboard.py "An Unused Dashboard"
"""
dashboard_title = sys.argv[1] if len(sys.argv) > 1 else ""
if not dashboard_title:
raise exceptions.ArgumentError("Please provide: <dashboardTitle>")
dashboards = get_dashboards(dashboard_title)
delete_dashboards(dashboards)
def get_dashboards(title: str) -> Sequence[models.Dashboard]:
"""Get dashboards with matching title"""
lc_title = title.lower()
results = sdk.search_dashboards(title=lc_title)
if not results:
raise exceptions.NotFoundError(f'dashboard "{title}" not found')
assert isinstance(results, Sequence)
return results
def delete_dashboards(dashboards: Sequence[models.Dashboard]):
"""Soft delete dashboards"""
for dashboard in dashboards:
try:
assert dashboard.id
sdk.delete_dashboard(dashboard.id)
except error.SDKError:
print(f"Failed to delete dashboard with id {dashboard.id}.")
else:
print(f'"{dashboard.title}" (id {dashboard.id}) has been moved to trash.')
main()
| mit |
cbanta/pjproject | tests/pjsua/mod_recvfrom.py | 39 | 2746 | # $Id$
import imp
import sys
import inc_sip as sip
import inc_const as const
import re
from inc_cfg import *
# Read configuration
cfg_file = imp.load_source("cfg_file", ARGS[1])
# Default server port (should we randomize?)
srv_port = 50070
def test_func(test):
pjsua = test.process[0]
dlg = sip.Dialog("127.0.0.1", pjsua.inst_param.sip_port,
local_port=srv_port,
tcp=cfg_file.recvfrom_cfg.tcp)
last_cseq = 0
last_method = ""
last_call_id = ""
for t in cfg_file.recvfrom_cfg.transaction:
# Print transaction title
if t.title != "":
dlg.trace(t.title)
# Run command and expect patterns
for c in t.cmds:
if c[0] and c[0] != "":
pjsua.send(c[0])
if len(c)>1 and c[1] and c[1] != "":
pjsua.expect(c[1])
# Wait for request
if t.check_cseq:
# Absorbs retransmissions
cseq = 0
method = last_method
call_id = last_call_id
while cseq <= last_cseq and method == last_method and call_id == last_call_id:
request, src_addr = dlg.wait_msg_from(30)
if request==None or request=="":
raise TestError("Timeout waiting for request")
method = request.split(" ", 1)[0]
cseq_hval = sip.get_header(request, "CSeq")
cseq_hval = cseq_hval.split(" ")[0]
cseq = int(cseq_hval)
call_id = sip.get_header(request, "Call-ID")
last_cseq = cseq
last_method = method
else:
request, src_addr = dlg.wait_msg_from(30)
if request==None or request=="":
raise TestError("Timeout waiting for request")
# Check for include patterns
for pat in t.include:
if re.search(pat, request, re.M | re.I)==None:
if t.title:
tname = " in " + t.title + " transaction"
else:
tname = ""
raise TestError("Pattern " + pat + " not found" + tname)
# Check for exclude patterns
for pat in t.exclude:
if re.search(pat, request, re.M | re.I)!=None:
if t.title:
tname = " in " + t.title + " transaction"
else:
tname = ""
raise TestError("Excluded pattern " + pat + " found" + tname)
# Create response
if t.resp_code!=0:
response = dlg.create_response(request, t.resp_code, "Status reason")
# Add headers to response
for h in t.resp_hdr:
response = response + h + "\r\n"
# Add message body if required
if t.body:
response = response + t.body
# Send response
dlg.send_msg(response, src_addr)
# Expect something to happen in pjsua
if t.expect != "":
pjsua.expect(t.expect)
# Sync
pjsua.sync_stdout()
# Replace "$PORT" with server port in pjsua args
cfg_file.recvfrom_cfg.inst_param.arg = cfg_file.recvfrom_cfg.inst_param.arg.replace("$PORT", str(srv_port))
# Here where it all comes together
test = TestParam(cfg_file.recvfrom_cfg.name,
[cfg_file.recvfrom_cfg.inst_param],
test_func)
| gpl-2.0 |
robmcmullen/peppy | peppy/major_modes/fortran_95.py | 1 | 1742 | # peppy Copyright (c) 2006-2009 Rob McMullen
# Licenced under the GPLv2; see http://peppy.flipturn.org for more info
"""Fortran 95 programming language editing support.
Major mode for editing Fortran 95 files.
Supporting actions and minor modes should go here only if they are uniquely
applicable to this major mode and can't be used in other major modes. If
actions can be used with multiple major modes, they should be put in a
separate plugin in the peppy/plugins directory.
"""
import os
import wx
import wx.stc
from peppy.lib.foldexplorer import *
from peppy.lib.autoindent import *
from peppy.yapsy.plugins import *
from peppy.major import *
from peppy.editra.style_specs import unique_keywords
from peppy.fundamental import FundamentalMode
class Fortran95Mode(FundamentalMode):
"""Stub major mode for editing Fortran 95 files.
This major mode has been automatically generated and is a boilerplate/
placeholder major mode. Enhancements to this mode are appreciated!
"""
keyword = 'Fortran 95'
editra_synonym = 'Fortran 95'
stc_lexer_id = wx.stc.STC_LEX_FORTRAN
start_line_comment = '!'
end_line_comment = ''
icon = 'icons/page_white.png'
default_classprefs = (
StrParam('extensions', 'f2k f90 f95 fpp', fullwidth=True),
StrParam('keyword_set_0', unique_keywords[38], hidden=False, fullwidth=True),
StrParam('keyword_set_1', unique_keywords[39], hidden=False, fullwidth=True),
StrParam('keyword_set_2', unique_keywords[40], hidden=False, fullwidth=True),
)
class Fortran95ModePlugin(IPeppyPlugin):
"""Plugin to register modes and user interface for Fortran 95
"""
def getMajorModes(self):
yield Fortran95Mode
| gpl-2.0 |
HH890612/MiliCloud | lib/requests/api.py | 92 | 5400 | # -*- coding: utf-8 -*-
"""
requests.api
~~~~~~~~~~~~
This module implements the Requests API.
:copyright: (c) 2012 by Kenneth Reitz.
:license: Apache2, see LICENSE for more details.
"""
from . import sessions
def request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': ('filename', fileobj)}``) for multipart encoding upload.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the server to send data
before giving up, as a float, or a :ref:`(connect timeout, read
timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
:return: :class:`Response <Response>` object
:rtype: requests.Response
Usage::
>>> import requests
>>> req = requests.request('GET', 'http://httpbin.org/get')
<Response [200]>
"""
session = sessions.Session()
response = session.request(method=method, url=url, **kwargs)
# By explicitly closing the session, we avoid leaving sockets open which
# can trigger a ResourceWarning in some cases, and look like a memory leak
# in others.
session.close()
return response
def get(url, params=None, **kwargs):
"""Sends a GET request.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return request('get', url, params=params, **kwargs)
def options(url, **kwargs):
"""Sends a OPTIONS request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return request('options', url, **kwargs)
def head(url, **kwargs):
"""Sends a HEAD request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', False)
return request('head', url, **kwargs)
def post(url, data=None, json=None, **kwargs):
"""Sends a POST request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('post', url, data=data, json=json, **kwargs)
def put(url, data=None, **kwargs):
"""Sends a PUT request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('put', url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
"""Sends a PATCH request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('patch', url, data=data, **kwargs)
def delete(url, **kwargs):
"""Sends a DELETE request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('delete', url, **kwargs)
| mit |
evernym/plenum | plenum/common/script_helper.py | 2 | 6795 | import os
from jsonpickle import json
from plenum.common.constants import CLIENT_STACK_SUFFIX
from plenum.common.roles import Roles
from plenum.common.transactions import PlenumTransactions
from storage.text_file_store import TextFileStore
NodeInfoFile = "node-info"
GenTxnFile = "genesis_txn"
ExportedTxnFile = "exported_genesis_txn"
def buildKeepDirIfNotExists(baseDir):
keepDir = os.path.expanduser(baseDir)
if not os.path.exists(keepDir):
os.makedirs(keepDir, exist_ok=True)
def isNodeType(baseDir, name):
filepath = os.path.join(os.path.expanduser(baseDir),
name + CLIENT_STACK_SUFFIX)
if os.path.exists(filepath):
return True
else:
return False
def getLedger(baseDir, dbName, storeHash=True, isLineNoKey: bool = False):
return TextFileStore(
dbDir=baseDir,
dbName=dbName,
storeContentHash=storeHash,
isLineNoKey=isLineNoKey)
def storeToFile(baseDir, dbName, value, key,
storeHash=True, isLineNoKey=False):
ledger = getLedger(baseDir, dbName, storeHash=storeHash,
isLineNoKey=isLineNoKey)
if key is None:
ledger.put(value)
else:
ledger.put(value, key)
ledger.close()
def getNodeInfo(baseDir, nodeName):
ledger = getLedger(baseDir, NodeInfoFile, storeHash=False,
isLineNoKey=False)
rec = ledger.get(nodeName)
ledger.close()
return json.loads(rec)
def storeNodeInfo(baseDir, nodeName, steward, nodeip, nodeport, clientip,
clientport):
data = {}
vnodeip, vnodeport, vclientip, vclientport = getAddGenesisHAs(nodeip,
nodeport,
clientip,
clientport)
nodeAddr = vnodeip + ":" + str(vnodeport)
clientAddr = vclientip + ":" + str(vclientport)
data['steward'] = steward
data['nodeAddr'] = nodeAddr
data['clientAddr'] = clientAddr
newJsonData = json.dumps(data)
ledger = getLedger(baseDir, NodeInfoFile, storeHash=False,
isLineNoKey=False)
storedJsonData = ledger.get(nodeName)
if not storedJsonData:
storeToFile(baseDir, NodeInfoFile, newJsonData, nodeName,
storeHash=False, isLineNoKey=False)
elif not storedJsonData == newJsonData:
newRec = []
for key, jsonValue in ledger.iterator(include_key=True,
include_value=True):
if key != nodeName:
newRec.append((key, jsonValue))
newRec.append((nodeName, newJsonData))
ledger.reset()
for key, value in newRec:
storeToFile(baseDir, NodeInfoFile, value, key, storeHash=False,
isLineNoKey=False)
ledger.close()
def storeExportedTxns(baseDir, txn):
storeToFile(baseDir, ExportedTxnFile, txn, None, storeHash=False,
isLineNoKey=True)
def storeGenTxns(baseDir, txn):
storeToFile(baseDir, GenTxnFile, txn, None, storeHash=False,
isLineNoKey=True)
def getAddGenesisHAs(nodeip, nodeport, clientip, clientport):
vnodeip = nodeip if nodeip else "127.0.0.1"
vnodeport = nodeport if nodeport else "9701"
vclientip = clientip if clientip else vnodeip
vclientport = clientport if clientport else str(int(vnodeport) + 1)
return vnodeip, vnodeport, vclientip, vclientport
def getAddNewGenNodeCommand(name, verkey, stewardkey, nodeip, nodeport,
clientip, clientport):
vnodeip, vnodeport, vclientip, vclientport = getAddGenesisHAs(nodeip,
nodeport,
clientip,
clientport)
nodeAddr = vnodeip + ":" + vnodeport
clientAddr = vclientip + ":" + vclientport
return 'add genesis transaction {node} with data {"'.format(node=PlenumTransactions.NODE.name) + name + '": {' \
'"verkey": ' + verkey + \
'"node_address": "' + nodeAddr + '", "client_address": "' + \
clientAddr + '"},' \
'"by": "' + stewardkey + '"}'
def getOldAddNewGenNodeCommand(name, verkey, stewardverkey, nodeip, nodeport,
clientip, clientport):
vnodeip, vnodeport, vclientip, vclientport = getAddGenesisHAs(nodeip,
nodeport,
clientip,
clientport)
return 'add genesis transaction {node} for '.format(node=PlenumTransactions.NODE.name) + verkey + ' by ' + \
stewardverkey + ' with data {"node_ip": "' + \
vnodeip + '", "node_port": ' + vnodeport + ', "client_ip": "' + \
vclientip + '", "client_port": ' + \
vclientport + ', "alias": "' + name + '"}'
def generateNodeGenesisTxn(baseDir, displayTxn, name, verkey, stewardverkey,
nodeip, nodeport, clientip, clientport):
storeNodeInfo(baseDir, name, stewardverkey, nodeip, nodeport, clientip,
clientport)
txn = getOldAddNewGenNodeCommand(name, verkey, stewardverkey, nodeip,
nodeport, clientip, clientport)
storeGenTxns(baseDir, txn)
printGenTxn(txn, displayTxn)
def getAddNewGenStewardCommand(name, verkey):
return 'add genesis transaction {nym} with data {"'.format(nym=PlenumTransactions.NYM.name) \
+ name + '": {"verkey": "' + verkey + \
'"} role={role}'.format(role=Roles.STEWARD.name)
def getOldAddNewGenStewardCommand(name, verkey):
return 'add genesis transaction {nym} for '.format(nym=PlenumTransactions.NYM.name) + verkey + ' with data ' \
'{"alias": ' \
'"' + name + \
'"} role={role}'.format(role=Roles.STEWARD.name)
def generateStewardGenesisTxn(baseDir, displayTxn, name, verkey):
txn = getOldAddNewGenStewardCommand(name, verkey)
storeGenTxns(baseDir, txn)
printGenTxn(txn, displayTxn)
def printGenTxn(txn, displayTxn):
if displayTxn:
print('\n' + txn)
def _checkClientConnected(cli, ):
assert cli.hasSufficientConnections
| apache-2.0 |
clayz/crazy-quiz-web | lib/werkzeug/exceptions.py | 316 | 17799 | # -*- coding: utf-8 -*-
"""
werkzeug.exceptions
~~~~~~~~~~~~~~~~~~~
This module implements a number of Python exceptions you can raise from
within your views to trigger a standard non-200 response.
Usage Example
-------------
::
from werkzeug.wrappers import BaseRequest
from werkzeug.wsgi import responder
from werkzeug.exceptions import HTTPException, NotFound
def view(request):
raise NotFound()
@responder
def application(environ, start_response):
request = BaseRequest(environ)
try:
return view(request)
except HTTPException as e:
return e
As you can see from this example those exceptions are callable WSGI
applications. Because of Python 2.4 compatibility those do not extend
from the response objects but only from the python exception class.
As a matter of fact they are not Werkzeug response objects. However you
can get a response object by calling ``get_response()`` on a HTTP
exception.
Keep in mind that you have to pass an environment to ``get_response()``
because some errors fetch additional information from the WSGI
environment.
If you want to hook in a different exception page to say, a 404 status
code, you can add a second except for a specific subclass of an error::
@responder
def application(environ, start_response):
request = BaseRequest(environ)
try:
return view(request)
except NotFound, e:
return not_found(request)
except HTTPException, e:
return e
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys
# Because of bootstrapping reasons we need to manually patch ourselves
# onto our parent module.
import werkzeug
werkzeug.exceptions = sys.modules[__name__]
from werkzeug._internal import _get_environ
from werkzeug._compat import iteritems, integer_types, text_type, \
implements_to_string
from werkzeug.wrappers import Response
@implements_to_string
class HTTPException(Exception):
"""
Baseclass for all HTTP exceptions. This exception can be called as WSGI
application to render a default error page or you can catch the subclasses
of it independently and render nicer error messages.
"""
code = None
description = None
def __init__(self, description=None, response=None):
Exception.__init__(self)
if description is not None:
self.description = description
self.response = response
@classmethod
def wrap(cls, exception, name=None):
"""This method returns a new subclass of the exception provided that
also is a subclass of `BadRequest`.
"""
class newcls(cls, exception):
def __init__(self, arg=None, *args, **kwargs):
cls.__init__(self, *args, **kwargs)
exception.__init__(self, arg)
newcls.__module__ = sys._getframe(1).f_globals.get('__name__')
newcls.__name__ = name or cls.__name__ + exception.__name__
return newcls
@property
def name(self):
"""The status name."""
return HTTP_STATUS_CODES.get(self.code, 'Unknown Error')
def get_description(self, environ=None):
"""Get the description."""
return u'<p>%s</p>' % escape(self.description)
def get_body(self, environ=None):
"""Get the HTML body."""
return text_type((
u'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
u'<title>%(code)s %(name)s</title>\n'
u'<h1>%(name)s</h1>\n'
u'%(description)s\n'
) % {
'code': self.code,
'name': escape(self.name),
'description': self.get_description(environ)
})
def get_headers(self, environ=None):
"""Get a list of headers."""
return [('Content-Type', 'text/html')]
def get_response(self, environ=None):
"""Get a response object. If one was passed to the exception
it's returned directly.
:param environ: the optional environ for the request. This
can be used to modify the response depending
on how the request looked like.
:return: a :class:`Response` object or a subclass thereof.
"""
if self.response is not None:
return self.response
if environ is not None:
environ = _get_environ(environ)
headers = self.get_headers(environ)
return Response(self.get_body(environ), self.code, headers)
def __call__(self, environ, start_response):
"""Call the exception as WSGI application.
:param environ: the WSGI environment.
:param start_response: the response callable provided by the WSGI
server.
"""
response = self.get_response(environ)
return response(environ, start_response)
def __str__(self):
return '%d: %s' % (self.code, self.name)
def __repr__(self):
return '<%s \'%s\'>' % (self.__class__.__name__, self)
class BadRequest(HTTPException):
"""*400* `Bad Request`
Raise if the browser sends something to the application the application
or server cannot handle.
"""
code = 400
description = (
'The browser (or proxy) sent a request that this server could '
'not understand.'
)
class ClientDisconnected(BadRequest):
"""Internal exception that is raised if Werkzeug detects a disconnected
client. Since the client is already gone at that point attempting to
send the error message to the client might not work and might ultimately
result in another exception in the server. Mainly this is here so that
it is silenced by default as far as Werkzeug is concerned.
Since disconnections cannot be reliably detected and are unspecified
by WSGI to a large extend this might or might not be raised if a client
is gone.
.. versionadded:: 0.8
"""
class SecurityError(BadRequest):
"""Raised if something triggers a security error. This is otherwise
exactly like a bad request error.
.. versionadded:: 0.9
"""
class Unauthorized(HTTPException):
"""*401* `Unauthorized`
Raise if the user is not authorized. Also used if you want to use HTTP
basic auth.
"""
code = 401
description = (
'The server could not verify that you are authorized to access '
'the URL requested. You either supplied the wrong credentials (e.g. '
'a bad password), or your browser doesn\'t understand how to supply '
'the credentials required.'
)
class Forbidden(HTTPException):
"""*403* `Forbidden`
Raise if the user doesn't have the permission for the requested resource
but was authenticated.
"""
code = 403
description = (
'You don\'t have the permission to access the requested resource. '
'It is either read-protected or not readable by the server.'
)
class NotFound(HTTPException):
"""*404* `Not Found`
Raise if a resource does not exist and never existed.
"""
code = 404
description = (
'The requested URL was not found on the server. '
'If you entered the URL manually please check your spelling and '
'try again.'
)
class MethodNotAllowed(HTTPException):
"""*405* `Method Not Allowed`
Raise if the server used a method the resource does not handle. For
example `POST` if the resource is view only. Especially useful for REST.
The first argument for this exception should be a list of allowed methods.
Strictly speaking the response would be invalid if you don't provide valid
methods in the header which you can do with that list.
"""
code = 405
description = 'The method is not allowed for the requested URL.'
def __init__(self, valid_methods=None, description=None):
"""Takes an optional list of valid http methods
starting with werkzeug 0.3 the list will be mandatory."""
HTTPException.__init__(self, description)
self.valid_methods = valid_methods
def get_headers(self, environ):
headers = HTTPException.get_headers(self, environ)
if self.valid_methods:
headers.append(('Allow', ', '.join(self.valid_methods)))
return headers
class NotAcceptable(HTTPException):
"""*406* `Not Acceptable`
Raise if the server can't return any content conforming to the
`Accept` headers of the client.
"""
code = 406
description = (
'The resource identified by the request is only capable of '
'generating response entities which have content characteristics '
'not acceptable according to the accept headers sent in the '
'request.'
)
class RequestTimeout(HTTPException):
"""*408* `Request Timeout`
Raise to signalize a timeout.
"""
code = 408
description = (
'The server closed the network connection because the browser '
'didn\'t finish the request within the specified time.'
)
class Conflict(HTTPException):
"""*409* `Conflict`
Raise to signal that a request cannot be completed because it conflicts
with the current state on the server.
.. versionadded:: 0.7
"""
code = 409
description = (
'A conflict happened while processing the request. The resource '
'might have been modified while the request was being processed.'
)
class Gone(HTTPException):
"""*410* `Gone`
Raise if a resource existed previously and went away without new location.
"""
code = 410
description = (
'The requested URL is no longer available on this server and '
'there is no forwarding address.</p><p>If you followed a link '
'from a foreign page, please contact the author of this page.'
)
class LengthRequired(HTTPException):
"""*411* `Length Required`
Raise if the browser submitted data but no ``Content-Length`` header which
is required for the kind of processing the server does.
"""
code = 411
description = (
'A request with this method requires a valid <code>Content-'
'Length</code> header.'
)
class PreconditionFailed(HTTPException):
"""*412* `Precondition Failed`
Status code used in combination with ``If-Match``, ``If-None-Match``, or
``If-Unmodified-Since``.
"""
code = 412
description = (
'The precondition on the request for the URL failed positive '
'evaluation.'
)
class RequestEntityTooLarge(HTTPException):
"""*413* `Request Entity Too Large`
The status code one should return if the data submitted exceeded a given
limit.
"""
code = 413
description = (
'The data value transmitted exceeds the capacity limit.'
)
class RequestURITooLarge(HTTPException):
"""*414* `Request URI Too Large`
Like *413* but for too long URLs.
"""
code = 414
description = (
'The length of the requested URL exceeds the capacity limit '
'for this server. The request cannot be processed.'
)
class UnsupportedMediaType(HTTPException):
"""*415* `Unsupported Media Type`
The status code returned if the server is unable to handle the media type
the client transmitted.
"""
code = 415
description = (
'The server does not support the media type transmitted in '
'the request.'
)
class RequestedRangeNotSatisfiable(HTTPException):
"""*416* `Requested Range Not Satisfiable`
The client asked for a part of the file that lies beyond the end
of the file.
.. versionadded:: 0.7
"""
code = 416
description = (
'The server cannot provide the requested range.'
)
class ExpectationFailed(HTTPException):
"""*417* `Expectation Failed`
The server cannot meet the requirements of the Expect request-header.
.. versionadded:: 0.7
"""
code = 417
description = (
'The server could not meet the requirements of the Expect header'
)
class ImATeapot(HTTPException):
"""*418* `I'm a teapot`
The server should return this if it is a teapot and someone attempted
to brew coffee with it.
.. versionadded:: 0.7
"""
code = 418
description = (
'This server is a teapot, not a coffee machine'
)
class UnprocessableEntity(HTTPException):
"""*422* `Unprocessable Entity`
Used if the request is well formed, but the instructions are otherwise
incorrect.
"""
code = 422
description = (
'The request was well-formed but was unable to be followed '
'due to semantic errors.'
)
class PreconditionRequired(HTTPException):
"""*428* `Precondition Required`
The server requires this request to be conditional, typically to prevent
the lost update problem, which is a race condition between two or more
clients attempting to update a resource through PUT or DELETE. By requiring
each client to include a conditional header ("If-Match" or "If-Unmodified-
Since") with the proper value retained from a recent GET request, the
server ensures that each client has at least seen the previous revision of
the resource.
"""
code = 428
description = (
'This request is required to be conditional; try using "If-Match" '
'or "If-Unmodified-Since".'
)
class TooManyRequests(HTTPException):
"""*429* `Too Many Requests`
The server is limiting the rate at which this user receives responses, and
this request exceeds that rate. (The server may use any convenient method
to identify users and their request rates). The server may include a
"Retry-After" header to indicate how long the user should wait before
retrying.
"""
code = 429
description = (
'This user has exceeded an allotted request count. Try again later.'
)
class RequestHeaderFieldsTooLarge(HTTPException):
"""*431* `Request Header Fields Too Large`
The server refuses to process the request because the header fields are too
large. One or more individual fields may be too large, or the set of all
headers is too large.
"""
code = 431
description = (
'One or more header fields exceeds the maximum size.'
)
class InternalServerError(HTTPException):
"""*500* `Internal Server Error`
Raise if an internal server error occurred. This is a good fallback if an
unknown error occurred in the dispatcher.
"""
code = 500
description = (
'The server encountered an internal error and was unable to '
'complete your request. Either the server is overloaded or there '
'is an error in the application.'
)
class NotImplemented(HTTPException):
"""*501* `Not Implemented`
Raise if the application does not support the action requested by the
browser.
"""
code = 501
description = (
'The server does not support the action requested by the '
'browser.'
)
class BadGateway(HTTPException):
"""*502* `Bad Gateway`
If you do proxying in your application you should return this status code
if you received an invalid response from the upstream server it accessed
in attempting to fulfill the request.
"""
code = 502
description = (
'The proxy server received an invalid response from an upstream '
'server.'
)
class ServiceUnavailable(HTTPException):
"""*503* `Service Unavailable`
Status code you should return if a service is temporarily unavailable.
"""
code = 503
description = (
'The server is temporarily unable to service your request due to '
'maintenance downtime or capacity problems. Please try again '
'later.'
)
default_exceptions = {}
__all__ = ['HTTPException']
def _find_exceptions():
for name, obj in iteritems(globals()):
try:
if getattr(obj, 'code', None) is not None:
default_exceptions[obj.code] = obj
__all__.append(obj.__name__)
except TypeError: # pragma: no cover
continue
_find_exceptions()
del _find_exceptions
class Aborter(object):
"""
When passed a dict of code -> exception items it can be used as
callable that raises exceptions. If the first argument to the
callable is an integer it will be looked up in the mapping, if it's
a WSGI application it will be raised in a proxy exception.
The rest of the arguments are forwarded to the exception constructor.
"""
def __init__(self, mapping=None, extra=None):
if mapping is None:
mapping = default_exceptions
self.mapping = dict(mapping)
if extra is not None:
self.mapping.update(extra)
def __call__(self, code, *args, **kwargs):
if not args and not kwargs and not isinstance(code, integer_types):
raise HTTPException(response=code)
if code not in self.mapping:
raise LookupError('no exception for %r' % code)
raise self.mapping[code](*args, **kwargs)
abort = Aborter()
#: an exception that is used internally to signal both a key error and a
#: bad request. Used by a lot of the datastructures.
BadRequestKeyError = BadRequest.wrap(KeyError)
# imported here because of circular dependencies of werkzeug.utils
from werkzeug.utils import escape
from werkzeug.http import HTTP_STATUS_CODES
| apache-2.0 |
bhavin04890/finaldashboard | static/scripts/tools/csv2xml.py | 12 | 3028 | # -*- coding: utf-8 -*-
#
# Debug/Helper script for CSV stylesheet development
#
# >>> python csv2xml <CSV File>
# ... converts the CSV file into XML
#
# >>> python csv2xml <CSV File> <XSLT Stylesheet>
# ... converts the CSV file into XML and transforms it using the stylesheet
#
import sys
import csv
from lxml import etree
from xml.sax.saxutils import escape, unescape
TABLE = "table"
ROW = "row"
COL = "col"
FIELD = "field"
def xml_encode(s):
if s:
s = escape(s, {"'": "'", '"': """})
return s
def xml_decode(s):
if s:
s = unescape(s, {"'": "'", """: '"'})
return s
def parse(source):
parser = etree.XMLParser(no_network=False)
result = etree.parse(source, parser)
return result
def csv2tree(source, delimiter=",", quotechar='"'):
root = etree.Element(TABLE)
def utf_8_encode(source):
encodings = ["utf-8", "iso-8859-1"]
e = encodings[0]
for line in source:
if e:
try:
yield unicode(line, e, "strict").encode("utf-8")
except:
pass
else:
continue
for encoding in encodings:
try:
yield unicode(line, encoding, "strict").encode("utf-8")
except:
continue
else:
e = encoding
break
reader = csv.DictReader(utf_8_encode(source),
delimiter=delimiter,
quotechar=quotechar)
for r in reader:
row = etree.SubElement(root, ROW)
for k in r:
col = etree.SubElement(row, COL)
col.set(FIELD, str(k))
value = r[k]
if value:
text = str(value)
if text.lower() not in ("null", "<null>"):
text = xml_encode(unicode(text.decode("utf-8")))
col.text = text
else:
col.text = ""
return etree.ElementTree(root)
def transform(tree, stylesheet_path, **args):
if args:
_args = [(k, "'%s'" % args[k]) for k in args]
_args = dict(_args)
else:
_args = None
stylesheet = etree.parse(stylesheet_path)
ac = etree.XSLTAccessControl(read_file=True, read_network=True)
transformer = etree.XSLT(stylesheet, access_control=ac)
if _args:
result = transformer(tree, **_args)
else:
result = transformer(tree)
return result
def main(argv):
try:
csvpath = argv[0]
except:
print "Usage: python csv2xml <CSV File> [<XSLT Stylesheet>]"
return
try:
xslpath = argv[1]
except:
xslpath = None
csvfile = open(csvpath)
tree = csv2tree(csvfile)
if xslpath is not None:
tree = transform(tree, xslpath)
print etree.tostring(tree, pretty_print=True)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| mit |
erjac77/ansible-module-f5bigip | library/f5bigip_ltm_profile_diameter.py | 2 | 8266 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016-2018, Eric Jacob <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: f5bigip_ltm_profile_diameter
short_description: BIG-IP ltm profile diameter module
description:
- Configures a profile to manage Diameter network traffic.
version_added: "2.4"
author:
- "Gabriel Fortin (@GabrielFortin)"
options:
app_service:
description:
- Specifies the name of the application service to which the profile belongs.
connection_prime:
description:
- When enabled, and the system receives a capabilities exchange request from the client, the system will
establish connections and perform handshaking with all the servers prior to sending the capabilities
exchange answer to the client.
default: disabled
choices: ['disabled', 'enabled']
defaults_from:
description:
- Specifies the profile that you want to use as the parent profile.
default: diameter
description:
description:
- User defined description.
destination_realm:
description:
- This attribute has been deprecated as of BIG-IP v11.
handshake_timeout:
description:
- Specifies the handshake timeout in seconds.
default: 10
choices: range(0,4294967296)
host_ip_rewrite:
description:
- When enabled and the message is a capabilities exchange request or capabilities exchange answer, rewrite
the host-ip-address attribute with the system's egress IP address.
default: enabled
choices: ['disabled', 'enabled']
max_retransmit_attempts:
description:
- Specifies the maximum number of retransmit attempts.
default: 1
choices: range(0,4294967296)
max_watchdog_failure:
description:
- Specifies the maximum number of device watchdog failures that the traffic management system can take
before it tears down the connection.
default: 10
choices: range(0,4294967296)
name:
description:
- Specifies a unique name for the component.
required: true
origin_host_to_client:
description:
- Specifies the origin host to client of BIG-IP.
origin_host_to_server:
description:
- Specifies the origin host to server of BIG-IP.
origin_realm_to_client:
description:
- Specifies the origin realm of BIG-IP.
origin_realm_to_server:
description:
- Specifies the origin realm to server of BIG-IP.
overwrite_destination_host:
description:
- This attribute has been deprecated as of BIG-IP v11.
default: enabled
choices: ['disabled', 'enabled']
parent_avp:
description:
- Specifies the name of the Diameter attribute that the system uses to indicate if the persist-avp option is
embedded in a grouped avp.
choices: range(0, 4294967296)
partition:
description:
- Displays the administrative partition within which the profile resides.
persist_avp:
description:
- Specifies the name of the Diameter attribute that the system persists on.
reset_on_timeout:
description:
- When it is enabled and the watchdog failures exceed the max watchdog failure, the system resets the
connection.
default: enabled
choices: ['disabled', 'enabled']
retransmit_timeout:
description:
- Specifies the retransmit timeout in seconds.
default: 10
choices: range(0, 4294967296)
subscriber_aware:
description:
- When you enable this option, the system extracts available subscriber information, such as phone number or
phone model, from diameter authentication and/or accounting packets.
default: disabled
choices: ['disabled', 'enabled']
state:
description:
- Specifies the state of the component on the BIG-IP system.
default: present
choices: ['absent', 'present']
watchdog_timeout:
description:
- Specifies the watchdog timeout in seconds.
default: 0
choices: range(0, 4294967296)
requirements:
- BIG-IP >= 12.0
- ansible-common-f5
- f5-sdk
'''
EXAMPLES = '''
- name: Create LTM Profile Diameter
f5bigip_ltm_profile_diameter:
f5_hostname: 172.16.227.35
f5_username: admin
f5_password: admin
f5_port: 443
name: my_diameter_profile
partition: Common
description: My diameter profile
state: present
delegate_to: localhost
'''
RETURN = ''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves import range
from ansible_common_f5.base import F5_ACTIVATION_CHOICES
from ansible_common_f5.base import F5_NAMED_OBJ_ARGS
from ansible_common_f5.base import F5_PROVIDER_ARGS
from ansible_common_f5.bigip import F5BigIpNamedObject
class ModuleParams(object):
@property
def argument_spec(self):
argument_spec = dict(
app_service=dict(type='str'),
connection_prime=dict(type='str', choices=F5_ACTIVATION_CHOICES),
defaults_from=dict(type='str'),
description=dict(type='str'),
destination_realm=dict(type='str'),
handshake_timeout=dict(type='int', choices=range(0, 4294967296)),
host_ip_rewrite=dict(type='str', choices=F5_ACTIVATION_CHOICES),
max_retransmit_attempts=dict(type='int', choices=range(0, 4294967296)),
max_watchdog_failure=dict(type='int', choices=range(0, 4294967296)),
origin_host_to_client=dict(type='str'),
origin_host_to_server=dict(type='str'),
origin_realm_to_client=dict(type='str'),
origin_realm_to_server=dict(type='str'),
overwrite_destination_host=dict(type='str', choices=F5_ACTIVATION_CHOICES),
parent_avp=dict(type='str'),
persist_avp=dict(type='str'),
reset_on_timeout=dict(type='str', choices=F5_ACTIVATION_CHOICES),
retransmit_timeout=dict(type='int', choices=range(0, 4294967296)),
subscriber_aware=dict(type='str', choices=F5_ACTIVATION_CHOICES),
watchdog_timeout=dict(type='int', choices=range(0, 4294967296))
)
argument_spec.update(F5_PROVIDER_ARGS)
argument_spec.update(F5_NAMED_OBJ_ARGS)
return argument_spec
@property
def supports_check_mode(self):
return True
class F5BigIpLtmProfileDiameter(F5BigIpNamedObject):
def _set_crud_methods(self):
self._methods = {
'create': self._api.tm.ltm.profile.diameters.diameter.create,
'read': self._api.tm.ltm.profile.diameters.diameter.load,
'update': self._api.tm.ltm.profile.diameters.diameter.update,
'delete': self._api.tm.ltm.profile.diameters.diameter.delete,
'exists': self._api.tm.ltm.profile.diameters.diameter.exists
}
def main():
params = ModuleParams()
module = AnsibleModule(argument_spec=params.argument_spec, supports_check_mode=params.supports_check_mode)
try:
obj = F5BigIpLtmProfileDiameter(check_mode=module.check_mode, **module.params)
result = obj.flush()
module.exit_json(**result)
except Exception as exc:
module.fail_json(msg=str(exc))
if __name__ == '__main__':
main()
| apache-2.0 |
mattrobenolt/django | django/contrib/staticfiles/handlers.py | 581 | 2328 | from django.conf import settings
from django.contrib.staticfiles import utils
from django.contrib.staticfiles.views import serve
from django.core.handlers.wsgi import WSGIHandler, get_path_info
from django.utils.six.moves.urllib.parse import urlparse
from django.utils.six.moves.urllib.request import url2pathname
class StaticFilesHandler(WSGIHandler):
"""
WSGI middleware that intercepts calls to the static files directory, as
defined by the STATIC_URL setting, and serves those files.
"""
# May be used to differentiate between handler types (e.g. in a
# request_finished signal)
handles_files = True
def __init__(self, application):
self.application = application
self.base_url = urlparse(self.get_base_url())
super(StaticFilesHandler, self).__init__()
def get_base_url(self):
utils.check_settings()
return settings.STATIC_URL
def _should_handle(self, path):
"""
Checks if the path should be handled. Ignores the path if:
* the host is provided as part of the base_url
* the request's path isn't under the media path (or equal)
"""
return path.startswith(self.base_url[2]) and not self.base_url[1]
def file_path(self, url):
"""
Returns the relative path to the media file on disk for the given URL.
"""
relative_url = url[len(self.base_url[2]):]
return url2pathname(relative_url)
def serve(self, request):
"""
Actually serves the request path.
"""
return serve(request, self.file_path(request.path), insecure=True)
def get_response(self, request):
from django.http import Http404
if self._should_handle(request.path):
try:
return self.serve(request)
except Http404 as e:
if settings.DEBUG:
from django.views import debug
return debug.technical_404_response(request, e)
return super(StaticFilesHandler, self).get_response(request)
def __call__(self, environ, start_response):
if not self._should_handle(get_path_info(environ)):
return self.application(environ, start_response)
return super(StaticFilesHandler, self).__call__(environ, start_response)
| bsd-3-clause |
aaxelb/osf.io | framework/postcommit_tasks/handlers.py | 21 | 3764 | # -*- coding: utf-8 -*-
import functools
import hashlib
import logging
import threading
import binascii
from collections import OrderedDict
import os
from celery import chain
from framework.celery_tasks import app
from celery.local import PromiseProxy
from gevent.pool import Pool
from website import settings
_local = threading.local()
logger = logging.getLogger(__name__)
def postcommit_queue():
if not hasattr(_local, 'postcommit_queue'):
_local.postcommit_queue = OrderedDict()
return _local.postcommit_queue
def postcommit_celery_queue():
if not hasattr(_local, 'postcommit_celery_queue'):
_local.postcommit_celery_queue = OrderedDict()
return _local.postcommit_celery_queue
def postcommit_before_request():
_local.postcommit_queue = OrderedDict()
_local.postcommit_celery_queue = OrderedDict()
@app.task(max_retries=5, default_retry_delay=60)
def postcommit_celery_task_wrapper(queue):
# chain.apply calls the tasks synchronously without re-enqueuing each one
# http://stackoverflow.com/questions/34177131/how-to-solve-python-celery-error-when-using-chain-encodeerrorruntimeerrormaxi?answertab=votes#tab-top
chain(*queue.values()).apply()
def postcommit_after_request(response, base_status_error_code=500):
if response.status_code >= base_status_error_code:
_local.postcommit_queue = OrderedDict()
_local.postcommit_celery_queue = OrderedDict()
return response
try:
if postcommit_queue():
number_of_threads = 30 # one db connection per greenlet, let's share
pool = Pool(number_of_threads)
for func in postcommit_queue().values():
pool.spawn(func)
pool.join(timeout=5.0, raise_error=True) # 5 second timeout and reraise exceptions
if postcommit_celery_queue():
if settings.USE_CELERY:
# delay pushes the wrapper task into celery
postcommit_celery_task_wrapper.delay(postcommit_celery_queue())
else:
for task in postcommit_celery_queue().values():
task()
except AttributeError as ex:
if not settings.DEBUG_MODE:
logger.error('Post commit task queue not initialized: {}'.format(ex))
return response
def enqueue_postcommit_task(fn, args, kwargs, celery=False, once_per_request=True):
# make a hash of the pertinent data
raw = [fn.__name__, fn.__module__, args, kwargs]
m = hashlib.md5()
m.update('-'.join([x.__repr__() for x in raw]))
key = m.hexdigest()
if not once_per_request:
# we want to run it once for every occurrence, add a random string
key = '{}:{}'.format(key, binascii.hexlify(os.urandom(8)))
if celery and isinstance(fn, PromiseProxy):
postcommit_celery_queue().update({key: fn.si(*args, **kwargs)})
else:
postcommit_queue().update({key: functools.partial(fn, *args, **kwargs)})
handlers = {
'before_request': postcommit_before_request,
'after_request': postcommit_after_request,
}
def run_postcommit(once_per_request=True, celery=False):
'''
Delays function execution until after the request's transaction has been committed.
If you set the celery kwarg to True args and kwargs must be JSON serializable
Tasks will only be run if the response's status code is < 500.
:return:
'''
def wrapper(func):
# if we're local dev or running unit tests, run without queueing
if settings.DEBUG_MODE:
return func
@functools.wraps(func)
def wrapped(*args, **kwargs):
enqueue_postcommit_task(func, args, kwargs, celery=celery, once_per_request=once_per_request)
return wrapped
return wrapper
| apache-2.0 |
ForkedReposBak/mxnet | python/mxnet/gluon/contrib/nn/basic_layers.py | 2 | 17216 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable= arguments-differ
"""Custom neural network layers in model_zoo."""
__all__ = ['Concurrent', 'HybridConcurrent', 'Identity', 'SparseEmbedding',
'SyncBatchNorm', 'PixelShuffle1D', 'PixelShuffle2D',
'PixelShuffle3D']
import warnings
from .... import ndarray as nd, context
from ...block import HybridBlock, Block
from ...nn import Sequential, HybridSequential, BatchNorm
class Concurrent(Sequential):
"""Lays `Block` s concurrently.
This block feeds its input to all children blocks, and
produce the output by concatenating all the children blocks' outputs
on the specified axis.
Example::
net = Concurrent()
# use net's name_scope to give children blocks appropriate names.
with net.name_scope():
net.add(nn.Dense(10, activation='relu'))
net.add(nn.Dense(20))
net.add(Identity())
Parameters
----------
axis : int, default -1
The axis on which to concatenate the outputs.
"""
def __init__(self, axis=-1, prefix=None, params=None):
super(Concurrent, self).__init__(prefix=prefix, params=params)
self.axis = axis
def forward(self, x):
out = []
for block in self._children.values():
out.append(block()(x))
out = nd.concat(*out, dim=self.axis)
return out
class HybridConcurrent(HybridSequential):
"""Lays `HybridBlock` s concurrently.
This block feeds its input to all children blocks, and
produce the output by concatenating all the children blocks' outputs
on the specified axis.
Example::
net = HybridConcurrent()
# use net's name_scope to give children blocks appropriate names.
with net.name_scope():
net.add(nn.Dense(10, activation='relu'))
net.add(nn.Dense(20))
net.add(Identity())
Parameters
----------
axis : int, default -1
The axis on which to concatenate the outputs.
"""
def __init__(self, axis=-1, prefix=None, params=None):
super(HybridConcurrent, self).__init__(prefix=prefix, params=params)
self.axis = axis
def hybrid_forward(self, F, x):
out = []
for block in self._children.values():
out.append(block()(x))
out = F.concat(*out, dim=self.axis)
return out
class Identity(HybridBlock):
"""Block that passes through the input directly.
This block can be used in conjunction with HybridConcurrent
block for residual connection.
Example::
net = HybridConcurrent()
# use net's name_scope to give child Blocks appropriate names.
with net.name_scope():
net.add(nn.Dense(10, activation='relu'))
net.add(nn.Dense(20))
net.add(Identity())
"""
def __init__(self, prefix=None, params=None):
super(Identity, self).__init__(prefix=prefix, params=params)
def hybrid_forward(self, F, x):
return x
class SparseEmbedding(Block):
r"""Turns non-negative integers (indexes/tokens) into dense vectors
of fixed size. eg. [4, 20] -> [[0.25, 0.1], [0.6, -0.2]]
This SparseBlock is designed for distributed training with extremely large
input dimension. Both weight and gradient w.r.t. weight are `RowSparseNDArray`.
Note: if `sparse_grad` is set to True, the gradient w.r.t weight will be
sparse. Only a subset of optimizers support sparse gradients, including SGD, AdaGrad
and Adam. By default lazy updates is turned on, which may perform differently
from standard updates. For more details, please check the Optimization API at:
https://mxnet.incubator.apache.org/api/python/optimization/optimization.html
Parameters
----------
input_dim : int
Size of the vocabulary, i.e. maximum integer index + 1.
output_dim : int
Dimension of the dense embedding.
dtype : str or np.dtype, default 'float32'
Data type of output embeddings.
weight_initializer : Initializer
Initializer for the `embeddings` matrix.
Inputs:
- **data**: (N-1)-D tensor with shape: `(x1, x2, ..., xN-1)`.
Output:
- **out**: N-D tensor with shape: `(x1, x2, ..., xN-1, output_dim)`.
"""
def __init__(self, input_dim, output_dim, dtype='float32',
weight_initializer=None, **kwargs):
super(SparseEmbedding, self).__init__(**kwargs)
self._kwargs = {'input_dim': input_dim, 'output_dim': output_dim,
'dtype': dtype, 'sparse_grad': True}
self.weight = self.params.get('weight', shape=(input_dim, output_dim),
init=weight_initializer, dtype=dtype,
grad_stype='row_sparse', stype='row_sparse')
def forward(self, x):
weight = self.weight.row_sparse_data(x)
return nd.Embedding(x, weight, name='fwd', **self._kwargs)
def __repr__(self):
s = '{block_name}({input_dim} -> {output_dim}, {dtype})'
return s.format(block_name=self.__class__.__name__,
**self._kwargs)
class SyncBatchNorm(BatchNorm):
"""Cross-GPU Synchronized Batch normalization (SyncBN)
Standard BN [1]_ implementation only normalize the data within each device.
SyncBN normalizes the input within the whole mini-batch.
We follow the implementation described in the paper [2]_.
Note: Current implementation of SyncBN does not support FP16 training.
For FP16 inference, use standard nn.BatchNorm instead of SyncBN.
Parameters
----------
in_channels : int, default 0
Number of channels (feature maps) in input data. If not specified,
initialization will be deferred to the first time `forward` is called
and `in_channels` will be inferred from the shape of input data.
num_devices : int, default number of visible GPUs
momentum: float, default 0.9
Momentum for the moving average.
epsilon: float, default 1e-5
Small float added to variance to avoid dividing by zero.
center: bool, default True
If True, add offset of `beta` to normalized tensor.
If False, `beta` is ignored.
scale: bool, default True
If True, multiply by `gamma`. If False, `gamma` is not used.
When the next layer is linear (also e.g. `nn.relu`),
this can be disabled since the scaling
will be done by the next layer.
use_global_stats: bool, default False
If True, use global moving statistics instead of local batch-norm. This will force
change batch-norm into a scale shift operator.
If False, use local batch-norm.
beta_initializer: str or `Initializer`, default 'zeros'
Initializer for the beta weight.
gamma_initializer: str or `Initializer`, default 'ones'
Initializer for the gamma weight.
running_mean_initializer: str or `Initializer`, default 'zeros'
Initializer for the running mean.
running_variance_initializer: str or `Initializer`, default 'ones'
Initializer for the running variance.
Inputs:
- **data**: input tensor with arbitrary shape.
Outputs:
- **out**: output tensor with the same shape as `data`.
Reference:
.. [1] Ioffe, Sergey, and Christian Szegedy. "Batch normalization: Accelerating \
deep network training by reducing internal covariate shift." *ICML 2015*
.. [2] Hang Zhang, Kristin Dana, Jianping Shi, Zhongyue Zhang, Xiaogang Wang, \
Ambrish Tyagi, and Amit Agrawal. "Context Encoding for Semantic Segmentation." *CVPR 2018*
"""
def __init__(self, in_channels=0, num_devices=None, momentum=0.9, epsilon=1e-5,
center=True, scale=True, use_global_stats=False, beta_initializer='zeros',
gamma_initializer='ones', running_mean_initializer='zeros',
running_variance_initializer='ones', **kwargs):
super(SyncBatchNorm, self).__init__(
axis=1, momentum=momentum, epsilon=epsilon,
center=center, scale=scale,
use_global_stats=use_global_stats,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
running_mean_initializer=running_mean_initializer,
running_variance_initializer=running_variance_initializer,
in_channels=in_channels, **kwargs)
num_devices = self._get_num_devices() if num_devices is None else num_devices
self._kwargs = {'eps': epsilon, 'momentum': momentum,
'fix_gamma': not scale, 'use_global_stats': use_global_stats,
'ndev': num_devices, 'key': self.prefix}
def _get_num_devices(self):
warnings.warn("Caution using SyncBatchNorm: "
"if not using all the GPUs, please mannually set num_devices",
UserWarning)
num_devices = context.num_gpus()
num_devices = num_devices if num_devices > 0 else 1
return num_devices
def hybrid_forward(self, F, x, gamma, beta, running_mean, running_var):
return F.contrib.SyncBatchNorm(x, gamma, beta, running_mean, running_var,
name='fwd', **self._kwargs)
class PixelShuffle1D(HybridBlock):
r"""Pixel-shuffle layer for upsampling in 1 dimension.
Pixel-shuffling is the operation of taking groups of values along
the *channel* dimension and regrouping them into blocks of pixels
along the ``W`` dimension, thereby effectively multiplying that dimension
by a constant factor in size.
For example, a feature map of shape :math:`(fC, W)` is reshaped
into :math:`(C, fW)` by forming little value groups of size :math:`f`
and arranging them in a grid of size :math:`W`.
Parameters
----------
factor : int or 1-tuple of int
Upsampling factor, applied to the ``W`` dimension.
Inputs:
- **data**: Tensor of shape ``(N, f*C, W)``.
Outputs:
- **out**: Tensor of shape ``(N, C, W*f)``.
Examples
--------
>>> pxshuf = PixelShuffle1D(2)
>>> x = mx.nd.zeros((1, 8, 3))
>>> pxshuf(x).shape
(1, 4, 6)
"""
def __init__(self, factor):
super(PixelShuffle1D, self).__init__()
self._factor = int(factor)
def hybrid_forward(self, F, x):
"""Perform pixel-shuffling on the input."""
f = self._factor
# (N, C*f, W)
x = F.reshape(x, (0, -4, -1, f, 0)) # (N, C, f, W)
x = F.transpose(x, (0, 1, 3, 2)) # (N, C, W, f)
x = F.reshape(x, (0, 0, -3)) # (N, C, W*f)
return x
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self._factor)
class PixelShuffle2D(HybridBlock):
r"""Pixel-shuffle layer for upsampling in 2 dimensions.
Pixel-shuffling is the operation of taking groups of values along
the *channel* dimension and regrouping them into blocks of pixels
along the ``H`` and ``W`` dimensions, thereby effectively multiplying
those dimensions by a constant factor in size.
For example, a feature map of shape :math:`(f^2 C, H, W)` is reshaped
into :math:`(C, fH, fW)` by forming little :math:`f \times f` blocks
of pixels and arranging them in an :math:`H \times W` grid.
Pixel-shuffling together with regular convolution is an alternative,
learnable way of upsampling an image by arbitrary factors. It is reported
to help overcome checkerboard artifacts that are common in upsampling with
transposed convolutions (also called deconvolutions). See the paper
`Real-Time Single Image and Video Super-Resolution Using an Efficient
Sub-Pixel Convolutional Neural Network <https://arxiv.org/abs/1609.05158>`_
for further details.
Parameters
----------
factor : int or 2-tuple of int
Upsampling factors, applied to the ``H`` and ``W`` dimensions,
in that order.
Inputs:
- **data**: Tensor of shape ``(N, f1*f2*C, H, W)``.
Outputs:
- **out**: Tensor of shape ``(N, C, H*f1, W*f2)``.
Examples
--------
>>> pxshuf = PixelShuffle2D((2, 3))
>>> x = mx.nd.zeros((1, 12, 3, 5))
>>> pxshuf(x).shape
(1, 2, 6, 15)
"""
def __init__(self, factor):
super(PixelShuffle2D, self).__init__()
try:
self._factors = (int(factor),) * 2
except TypeError:
self._factors = tuple(int(fac) for fac in factor)
assert len(self._factors) == 2, "wrong length {}".format(len(self._factors))
def hybrid_forward(self, F, x):
"""Perform pixel-shuffling on the input."""
f1, f2 = self._factors
# (N, f1*f2*C, H, W)
x = F.reshape(x, (0, -4, -1, f1 * f2, 0, 0)) # (N, C, f1*f2, H, W)
x = F.reshape(x, (0, 0, -4, f1, f2, 0, 0)) # (N, C, f1, f2, H, W)
x = F.transpose(x, (0, 1, 4, 2, 5, 3)) # (N, C, H, f1, W, f2)
x = F.reshape(x, (0, 0, -3, -3)) # (N, C, H*f1, W*f2)
return x
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self._factors)
class PixelShuffle3D(HybridBlock):
r"""Pixel-shuffle layer for upsampling in 3 dimensions.
Pixel-shuffling (or voxel-shuffling in 3D) is the operation of taking
groups of values along the *channel* dimension and regrouping them into
blocks of voxels along the ``D``, ``H`` and ``W`` dimensions, thereby
effectively multiplying those dimensions by a constant factor in size.
For example, a feature map of shape :math:`(f^3 C, D, H, W)` is reshaped
into :math:`(C, fD, fH, fW)` by forming little :math:`f \times f \times f`
blocks of voxels and arranging them in a :math:`D \times H \times W` grid.
Pixel-shuffling together with regular convolution is an alternative,
learnable way of upsampling an image by arbitrary factors. It is reported
to help overcome checkerboard artifacts that are common in upsampling with
transposed convolutions (also called deconvolutions). See the paper
`Real-Time Single Image and Video Super-Resolution Using an Efficient
Sub-Pixel Convolutional Neural Network <https://arxiv.org/abs/1609.05158>`_
for further details.
Parameters
----------
factor : int or 3-tuple of int
Upsampling factors, applied to the ``D``, ``H`` and ``W``
dimensions, in that order.
Inputs:
- **data**: Tensor of shape ``(N, f1*f2*f3*C, D, H, W)``.
Outputs:
- **out**: Tensor of shape ``(N, C, D*f1, H*f2, W*f3)``.
Examples
--------
>>> pxshuf = PixelShuffle3D((2, 3, 4))
>>> x = mx.nd.zeros((1, 48, 3, 5, 7))
>>> pxshuf(x).shape
(1, 2, 6, 15, 28)
"""
def __init__(self, factor):
super(PixelShuffle3D, self).__init__()
try:
self._factors = (int(factor),) * 3
except TypeError:
self._factors = tuple(int(fac) for fac in factor)
assert len(self._factors) == 3, "wrong length {}".format(len(self._factors))
def hybrid_forward(self, F, x):
"""Perform pixel-shuffling on the input."""
# `transpose` doesn't support 8D, need other implementation
f1, f2, f3 = self._factors
# (N, C*f1*f2*f3, D, H, W)
x = F.reshape(x, (0, -4, -1, f1 * f2 * f3, 0, 0, 0)) # (N, C, f1*f2*f3, D, H, W)
x = F.swapaxes(x, 2, 3) # (N, C, D, f1*f2*f3, H, W)
x = F.reshape(x, (0, 0, 0, -4, f1, f2*f3, 0, 0)) # (N, C, D, f1, f2*f3, H, W)
x = F.reshape(x, (0, 0, -3, 0, 0, 0)) # (N, C, D*f1, f2*f3, H, W)
x = F.swapaxes(x, 3, 4) # (N, C, D*f1, H, f2*f3, W)
x = F.reshape(x, (0, 0, 0, 0, -4, f2, f3, 0)) # (N, C, D*f1, H, f2, f3, W)
x = F.reshape(x, (0, 0, 0, -3, 0, 0)) # (N, C, D*f1, H*f2, f3, W)
x = F.swapaxes(x, 4, 5) # (N, C, D*f1, H*f2, W, f3)
x = F.reshape(x, (0, 0, 0, 0, -3)) # (N, C, D*f1, H*f2, W*f3)
return x
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self._factors)
| apache-2.0 |
jagg81/translate-toolkit | build/lib.linux-x86_64-2.6/translate/convert/prop2po.py | 3 | 9977 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2002-2006 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with translate; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""convert Java/Mozilla .properties files to Gettext PO localization files
See: http://translate.sourceforge.net/wiki/toolkit/prop2po for examples and
usage instructions
"""
import sys
from translate.storage import po
from translate.storage import properties
class prop2po:
"""convert a .properties file to a .po file for handling the
translation."""
def convertstore(self, thepropfile, personality="java",
duplicatestyle="msgctxt"):
"""converts a .properties file to a .po file..."""
self.personality = personality
thetargetfile = po.pofile()
if self.personality == "mozilla" or self.personality == "skype":
targetheader = thetargetfile.init_headers(charset="UTF-8",
encoding="8bit",
x_accelerator_marker="&")
else:
targetheader = thetargetfile.init_headers(charset="UTF-8",
encoding="8bit")
targetheader.addnote("extracted from %s" % thepropfile.filename,
"developer")
# we try and merge the header po with any comments at the start of the
# properties file
appendedheader = False
waitingcomments = []
for propunit in thepropfile.units:
pounit = self.convertunit(propunit, "developer")
if pounit is None:
waitingcomments.extend(propunit.comments)
# FIXME the storage class should not be creating blank units
if pounit is "discard":
continue
if not appendedheader:
if propunit.isblank():
targetheader.addnote("\n".join(waitingcomments).rstrip(),
"developer", position="prepend")
waitingcomments = []
pounit = None
appendedheader = True
if pounit is not None:
pounit.addnote("\n".join(waitingcomments).rstrip(),
"developer", position="prepend")
waitingcomments = []
thetargetfile.addunit(pounit)
thetargetfile.removeduplicates(duplicatestyle)
return thetargetfile
def mergestore(self, origpropfile, translatedpropfile, personality="java",
blankmsgstr=False, duplicatestyle="msgctxt"):
"""converts two .properties files to a .po file..."""
self.personality = personality
thetargetfile = po.pofile()
if self.personality == "mozilla" or self.personality == "skype":
targetheader = thetargetfile.init_headers(charset="UTF-8",
encoding="8bit",
x_accelerator_marker="&")
else:
targetheader = thetargetfile.init_headers(charset="UTF-8",
encoding="8bit")
targetheader.addnote("extracted from %s, %s" % (origpropfile.filename, translatedpropfile.filename),
"developer")
translatedpropfile.makeindex()
# we try and merge the header po with any comments at the start of
# the properties file
appendedheader = False
waitingcomments = []
# loop through the original file, looking at units one by one
for origprop in origpropfile.units:
origpo = self.convertunit(origprop, "developer")
if origpo is None:
waitingcomments.extend(origprop.comments)
# FIXME the storage class should not be creating blank units
if origpo is "discard":
continue
# handle the header case specially...
if not appendedheader:
if origprop.isblank():
targetheader.addnote(u"".join(waitingcomments).rstrip(),
"developer", position="prepend")
waitingcomments = []
origpo = None
appendedheader = True
# try and find a translation of the same name...
if origprop.name in translatedpropfile.locationindex:
translatedprop = translatedpropfile.locationindex[origprop.name]
# Need to check that this comment is not a copy of the
# developer comments
translatedpo = self.convertunit(translatedprop, "translator")
if translatedpo is "discard":
continue
else:
translatedpo = None
# if we have a valid po unit, get the translation and add it...
if origpo is not None:
if translatedpo is not None and not blankmsgstr:
origpo.target = translatedpo.source
origpo.addnote(u"".join(waitingcomments).rstrip(),
"developer", position="prepend")
waitingcomments = []
thetargetfile.addunit(origpo)
elif translatedpo is not None:
print >> sys.stderr, "error converting original properties definition %s" % origprop.name
thetargetfile.removeduplicates(duplicatestyle)
return thetargetfile
def convertunit(self, propunit, commenttype):
"""Converts a .properties unit to a .po unit. Returns None if empty
or not for translation."""
if propunit is None:
return None
# escape unicode
pounit = po.pounit(encoding="UTF-8")
if hasattr(propunit, "comments"):
for comment in propunit.comments:
if "DONT_TRANSLATE" in comment:
return "discard"
pounit.addnote(u"".join(propunit.getnotes()).rstrip(), commenttype)
# TODO: handle multiline msgid
if propunit.isblank():
return None
pounit.addlocation(propunit.name)
pounit.source = propunit.source
pounit.target = u""
return pounit
def convertstrings(inputfile, outputfile, templatefile, personality="strings",
pot=False, duplicatestyle="msgctxt", encoding=None):
""".strings specific convertor function"""
return convertprop(inputfile, outputfile, templatefile,
personality="strings", pot=pot,
duplicatestyle=duplicatestyle, encoding=encoding)
def convertmozillaprop(inputfile, outputfile, templatefile, pot=False,
duplicatestyle="msgctxt"):
"""Mozilla specific convertor function"""
return convertprop(inputfile, outputfile, templatefile,
personality="mozilla", pot=pot,
duplicatestyle=duplicatestyle)
def convertprop(inputfile, outputfile, templatefile, personality="java",
pot=False, duplicatestyle="msgctxt", encoding=None):
"""reads in inputfile using properties, converts using prop2po, writes
to outputfile"""
inputstore = properties.propfile(inputfile, personality, encoding)
convertor = prop2po()
if templatefile is None:
outputstore = convertor.convertstore(inputstore, personality,
duplicatestyle=duplicatestyle)
else:
templatestore = properties.propfile(templatefile, personality, encoding)
outputstore = convertor.mergestore(templatestore, inputstore,
personality, blankmsgstr=pot,
duplicatestyle=duplicatestyle)
if outputstore.isempty():
return 0
outputfile.write(str(outputstore))
return 1
formats = {
"properties": ("po", convertprop),
("properties", "properties"): ("po", convertprop),
"lang": ("po", convertprop),
("lang", "lang"): ("po", convertprop),
"strings": ("po", convertstrings),
("strings", "strings"): ("po", convertstrings),
}
def main(argv=None):
from translate.convert import convert
parser = convert.ConvertOptionParser(formats, usetemplates=True,
usepots=True,
description=__doc__)
parser.add_option("", "--personality", dest="personality",
default=properties.default_dialect,
type="choice",
choices=properties.dialects.keys(),
help="override the input file format: %s (for .properties files, default: %s)" %
(", ".join(properties.dialects.iterkeys()),
properties.default_dialect),
metavar="TYPE")
parser.add_option("", "--encoding", dest="encoding", default=None,
help="override the encoding set by the personality",
metavar="ENCODING")
parser.add_duplicates_option()
parser.passthrough.append("pot")
parser.passthrough.append("personality")
parser.passthrough.append("encoding")
parser.run(argv)
if __name__ == '__main__':
main()
| gpl-2.0 |
boumenot/azure-linux-extensions | OSPatching/azure/__init__.py | 46 | 33598 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import ast
import base64
import hashlib
import hmac
import sys
import types
import warnings
import inspect
if sys.version_info < (3,):
from urllib2 import quote as url_quote
from urllib2 import unquote as url_unquote
_strtype = basestring
else:
from urllib.parse import quote as url_quote
from urllib.parse import unquote as url_unquote
_strtype = str
from datetime import datetime
from xml.dom import minidom
from xml.sax.saxutils import escape as xml_escape
#--------------------------------------------------------------------------
# constants
__author__ = 'Microsoft Corp. <[email protected]>'
__version__ = '0.8.4'
# Live ServiceClient URLs
BLOB_SERVICE_HOST_BASE = '.blob.core.windows.net'
QUEUE_SERVICE_HOST_BASE = '.queue.core.windows.net'
TABLE_SERVICE_HOST_BASE = '.table.core.windows.net'
SERVICE_BUS_HOST_BASE = '.servicebus.windows.net'
MANAGEMENT_HOST = 'management.core.windows.net'
# Development ServiceClient URLs
DEV_BLOB_HOST = '127.0.0.1:10000'
DEV_QUEUE_HOST = '127.0.0.1:10001'
DEV_TABLE_HOST = '127.0.0.1:10002'
# Default credentials for Development Storage Service
DEV_ACCOUNT_NAME = 'devstoreaccount1'
DEV_ACCOUNT_KEY = 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=='
# All of our error messages
_ERROR_CANNOT_FIND_PARTITION_KEY = 'Cannot find partition key in request.'
_ERROR_CANNOT_FIND_ROW_KEY = 'Cannot find row key in request.'
_ERROR_INCORRECT_TABLE_IN_BATCH = \
'Table should be the same in a batch operations'
_ERROR_INCORRECT_PARTITION_KEY_IN_BATCH = \
'Partition Key should be the same in a batch operations'
_ERROR_DUPLICATE_ROW_KEY_IN_BATCH = \
'Row Keys should not be the same in a batch operations'
_ERROR_BATCH_COMMIT_FAIL = 'Batch Commit Fail'
_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_DELETE = \
'Message is not peek locked and cannot be deleted.'
_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_UNLOCK = \
'Message is not peek locked and cannot be unlocked.'
_ERROR_QUEUE_NOT_FOUND = 'Queue was not found'
_ERROR_TOPIC_NOT_FOUND = 'Topic was not found'
_ERROR_CONFLICT = 'Conflict ({0})'
_ERROR_NOT_FOUND = 'Not found ({0})'
_ERROR_UNKNOWN = 'Unknown error ({0})'
_ERROR_SERVICEBUS_MISSING_INFO = \
'You need to provide servicebus namespace, access key and Issuer'
_ERROR_STORAGE_MISSING_INFO = \
'You need to provide both account name and access key'
_ERROR_ACCESS_POLICY = \
'share_access_policy must be either SignedIdentifier or AccessPolicy ' + \
'instance'
_WARNING_VALUE_SHOULD_BE_BYTES = \
'Warning: {0} must be bytes data type. It will be converted ' + \
'automatically, with utf-8 text encoding.'
_ERROR_VALUE_SHOULD_BE_BYTES = '{0} should be of type bytes.'
_ERROR_VALUE_NONE = '{0} should not be None.'
_ERROR_VALUE_NEGATIVE = '{0} should not be negative.'
_ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY = \
'Cannot serialize the specified value ({0}) to an entity. Please use ' + \
'an EntityProperty (which can specify custom types), int, str, bool, ' + \
'or datetime.'
_ERROR_PAGE_BLOB_SIZE_ALIGNMENT = \
'Invalid page blob size: {0}. ' + \
'The size must be aligned to a 512-byte boundary.'
_USER_AGENT_STRING = 'pyazure/' + __version__
METADATA_NS = 'http://schemas.microsoft.com/ado/2007/08/dataservices/metadata'
class WindowsAzureData(object):
''' This is the base of data class.
It is only used to check whether it is instance or not. '''
pass
class WindowsAzureError(Exception):
''' WindowsAzure Excpetion base class. '''
def __init__(self, message):
super(WindowsAzureError, self).__init__(message)
class WindowsAzureConflictError(WindowsAzureError):
'''Indicates that the resource could not be created because it already
exists'''
def __init__(self, message):
super(WindowsAzureConflictError, self).__init__(message)
class WindowsAzureMissingResourceError(WindowsAzureError):
'''Indicates that a request for a request for a resource (queue, table,
container, etc...) failed because the specified resource does not exist'''
def __init__(self, message):
super(WindowsAzureMissingResourceError, self).__init__(message)
class WindowsAzureBatchOperationError(WindowsAzureError):
'''Indicates that a batch operation failed'''
def __init__(self, message, code):
super(WindowsAzureBatchOperationError, self).__init__(message)
self.code = code
class Feed(object):
pass
class _Base64String(str):
pass
class HeaderDict(dict):
def __getitem__(self, index):
return super(HeaderDict, self).__getitem__(index.lower())
def _encode_base64(data):
if isinstance(data, _unicode_type):
data = data.encode('utf-8')
encoded = base64.b64encode(data)
return encoded.decode('utf-8')
def _decode_base64_to_bytes(data):
if isinstance(data, _unicode_type):
data = data.encode('utf-8')
return base64.b64decode(data)
def _decode_base64_to_text(data):
decoded_bytes = _decode_base64_to_bytes(data)
return decoded_bytes.decode('utf-8')
def _get_readable_id(id_name, id_prefix_to_skip):
"""simplified an id to be more friendly for us people"""
# id_name is in the form 'https://namespace.host.suffix/name'
# where name may contain a forward slash!
pos = id_name.find('//')
if pos != -1:
pos += 2
if id_prefix_to_skip:
pos = id_name.find(id_prefix_to_skip, pos)
if pos != -1:
pos += len(id_prefix_to_skip)
pos = id_name.find('/', pos)
if pos != -1:
return id_name[pos + 1:]
return id_name
def _get_entry_properties_from_node(entry, include_id, id_prefix_to_skip=None, use_title_as_id=False):
''' get properties from entry xml '''
properties = {}
etag = entry.getAttributeNS(METADATA_NS, 'etag')
if etag:
properties['etag'] = etag
for updated in _get_child_nodes(entry, 'updated'):
properties['updated'] = updated.firstChild.nodeValue
for name in _get_children_from_path(entry, 'author', 'name'):
if name.firstChild is not None:
properties['author'] = name.firstChild.nodeValue
if include_id:
if use_title_as_id:
for title in _get_child_nodes(entry, 'title'):
properties['name'] = title.firstChild.nodeValue
else:
for id in _get_child_nodes(entry, 'id'):
properties['name'] = _get_readable_id(
id.firstChild.nodeValue, id_prefix_to_skip)
return properties
def _get_entry_properties(xmlstr, include_id, id_prefix_to_skip=None):
''' get properties from entry xml '''
xmldoc = minidom.parseString(xmlstr)
properties = {}
for entry in _get_child_nodes(xmldoc, 'entry'):
properties.update(_get_entry_properties_from_node(entry, include_id, id_prefix_to_skip))
return properties
def _get_first_child_node_value(parent_node, node_name):
xml_attrs = _get_child_nodes(parent_node, node_name)
if xml_attrs:
xml_attr = xml_attrs[0]
if xml_attr.firstChild:
value = xml_attr.firstChild.nodeValue
return value
def _get_child_nodes(node, tagName):
return [childNode for childNode in node.getElementsByTagName(tagName)
if childNode.parentNode == node]
def _get_children_from_path(node, *path):
'''descends through a hierarchy of nodes returning the list of children
at the inner most level. Only returns children who share a common parent,
not cousins.'''
cur = node
for index, child in enumerate(path):
if isinstance(child, _strtype):
next = _get_child_nodes(cur, child)
else:
next = _get_child_nodesNS(cur, *child)
if index == len(path) - 1:
return next
elif not next:
break
cur = next[0]
return []
def _get_child_nodesNS(node, ns, tagName):
return [childNode for childNode in node.getElementsByTagNameNS(ns, tagName)
if childNode.parentNode == node]
def _create_entry(entry_body):
''' Adds common part of entry to a given entry body and return the whole
xml. '''
updated_str = datetime.utcnow().isoformat()
if datetime.utcnow().utcoffset() is None:
updated_str += '+00:00'
entry_start = '''<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<entry xmlns:d="http://schemas.microsoft.com/ado/2007/08/dataservices" xmlns:m="http://schemas.microsoft.com/ado/2007/08/dataservices/metadata" xmlns="http://www.w3.org/2005/Atom" >
<title /><updated>{updated}</updated><author><name /></author><id />
<content type="application/xml">
{body}</content></entry>'''
return entry_start.format(updated=updated_str, body=entry_body)
def _to_datetime(strtime):
return datetime.strptime(strtime, "%Y-%m-%dT%H:%M:%S.%f")
_KNOWN_SERIALIZATION_XFORMS = {
'include_apis': 'IncludeAPIs',
'message_id': 'MessageId',
'content_md5': 'Content-MD5',
'last_modified': 'Last-Modified',
'cache_control': 'Cache-Control',
'account_admin_live_email_id': 'AccountAdminLiveEmailId',
'service_admin_live_email_id': 'ServiceAdminLiveEmailId',
'subscription_id': 'SubscriptionID',
'fqdn': 'FQDN',
'private_id': 'PrivateID',
'os_virtual_hard_disk': 'OSVirtualHardDisk',
'logical_disk_size_in_gb': 'LogicalDiskSizeInGB',
'logical_size_in_gb': 'LogicalSizeInGB',
'os': 'OS',
'persistent_vm_downtime_info': 'PersistentVMDowntimeInfo',
'copy_id': 'CopyId',
}
def _get_serialization_name(element_name):
"""converts a Python name into a serializable name"""
known = _KNOWN_SERIALIZATION_XFORMS.get(element_name)
if known is not None:
return known
if element_name.startswith('x_ms_'):
return element_name.replace('_', '-')
if element_name.endswith('_id'):
element_name = element_name.replace('_id', 'ID')
for name in ['content_', 'last_modified', 'if_', 'cache_control']:
if element_name.startswith(name):
element_name = element_name.replace('_', '-_')
return ''.join(name.capitalize() for name in element_name.split('_'))
if sys.version_info < (3,):
_unicode_type = unicode
def _str(value):
if isinstance(value, unicode):
return value.encode('utf-8')
return str(value)
else:
_str = str
_unicode_type = str
def _str_or_none(value):
if value is None:
return None
return _str(value)
def _int_or_none(value):
if value is None:
return None
return str(int(value))
def _bool_or_none(value):
if value is None:
return None
if isinstance(value, bool):
if value:
return 'true'
else:
return 'false'
return str(value)
def _convert_class_to_xml(source, xml_prefix=True):
if source is None:
return ''
xmlstr = ''
if xml_prefix:
xmlstr = '<?xml version="1.0" encoding="utf-8"?>'
if isinstance(source, list):
for value in source:
xmlstr += _convert_class_to_xml(value, False)
elif isinstance(source, WindowsAzureData):
class_name = source.__class__.__name__
xmlstr += '<' + class_name + '>'
for name, value in vars(source).items():
if value is not None:
if isinstance(value, list) or \
isinstance(value, WindowsAzureData):
xmlstr += _convert_class_to_xml(value, False)
else:
xmlstr += ('<' + _get_serialization_name(name) + '>' +
xml_escape(str(value)) + '</' +
_get_serialization_name(name) + '>')
xmlstr += '</' + class_name + '>'
return xmlstr
def _find_namespaces_from_child(parent, child, namespaces):
"""Recursively searches from the parent to the child,
gathering all the applicable namespaces along the way"""
for cur_child in parent.childNodes:
if cur_child is child:
return True
if _find_namespaces_from_child(cur_child, child, namespaces):
# we are the parent node
for key in cur_child.attributes.keys():
if key.startswith('xmlns:') or key == 'xmlns':
namespaces[key] = cur_child.attributes[key]
break
return False
def _find_namespaces(parent, child):
res = {}
for key in parent.documentElement.attributes.keys():
if key.startswith('xmlns:') or key == 'xmlns':
res[key] = parent.documentElement.attributes[key]
_find_namespaces_from_child(parent, child, res)
return res
def _clone_node_with_namespaces(node_to_clone, original_doc):
clone = node_to_clone.cloneNode(True)
for key, value in _find_namespaces(original_doc, node_to_clone).items():
clone.attributes[key] = value
return clone
def _convert_response_to_feeds(response, convert_callback):
if response is None:
return None
feeds = _list_of(Feed)
x_ms_continuation = HeaderDict()
for name, value in response.headers:
if 'x-ms-continuation' in name:
x_ms_continuation[name[len('x-ms-continuation') + 1:]] = value
if x_ms_continuation:
setattr(feeds, 'x_ms_continuation', x_ms_continuation)
xmldoc = minidom.parseString(response.body)
xml_entries = _get_children_from_path(xmldoc, 'feed', 'entry')
if not xml_entries:
# in some cases, response contains only entry but no feed
xml_entries = _get_children_from_path(xmldoc, 'entry')
if inspect.isclass(convert_callback) and issubclass(convert_callback, WindowsAzureData):
for xml_entry in xml_entries:
return_obj = convert_callback()
for node in _get_children_from_path(xml_entry,
'content',
convert_callback.__name__):
_fill_data_to_return_object(node, return_obj)
for name, value in _get_entry_properties_from_node(xml_entry,
include_id=True,
use_title_as_id=True).items():
setattr(return_obj, name, value)
feeds.append(return_obj)
else:
for xml_entry in xml_entries:
new_node = _clone_node_with_namespaces(xml_entry, xmldoc)
feeds.append(convert_callback(new_node.toxml('utf-8')))
return feeds
def _validate_type_bytes(param_name, param):
if not isinstance(param, bytes):
raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name))
def _validate_not_none(param_name, param):
if param is None:
raise TypeError(_ERROR_VALUE_NONE.format(param_name))
def _fill_list_of(xmldoc, element_type, xml_element_name):
xmlelements = _get_child_nodes(xmldoc, xml_element_name)
return [_parse_response_body_from_xml_node(xmlelement, element_type) \
for xmlelement in xmlelements]
def _fill_scalar_list_of(xmldoc, element_type, parent_xml_element_name,
xml_element_name):
'''Converts an xml fragment into a list of scalar types. The parent xml
element contains a flat list of xml elements which are converted into the
specified scalar type and added to the list.
Example:
xmldoc=
<Endpoints>
<Endpoint>http://{storage-service-name}.blob.core.windows.net/</Endpoint>
<Endpoint>http://{storage-service-name}.queue.core.windows.net/</Endpoint>
<Endpoint>http://{storage-service-name}.table.core.windows.net/</Endpoint>
</Endpoints>
element_type=str
parent_xml_element_name='Endpoints'
xml_element_name='Endpoint'
'''
xmlelements = _get_child_nodes(xmldoc, parent_xml_element_name)
if xmlelements:
xmlelements = _get_child_nodes(xmlelements[0], xml_element_name)
return [_get_node_value(xmlelement, element_type) \
for xmlelement in xmlelements]
def _fill_dict(xmldoc, element_name):
xmlelements = _get_child_nodes(xmldoc, element_name)
if xmlelements:
return_obj = {}
for child in xmlelements[0].childNodes:
if child.firstChild:
return_obj[child.nodeName] = child.firstChild.nodeValue
return return_obj
def _fill_dict_of(xmldoc, parent_xml_element_name, pair_xml_element_name,
key_xml_element_name, value_xml_element_name):
'''Converts an xml fragment into a dictionary. The parent xml element
contains a list of xml elements where each element has a child element for
the key, and another for the value.
Example:
xmldoc=
<ExtendedProperties>
<ExtendedProperty>
<Name>Ext1</Name>
<Value>Val1</Value>
</ExtendedProperty>
<ExtendedProperty>
<Name>Ext2</Name>
<Value>Val2</Value>
</ExtendedProperty>
</ExtendedProperties>
element_type=str
parent_xml_element_name='ExtendedProperties'
pair_xml_element_name='ExtendedProperty'
key_xml_element_name='Name'
value_xml_element_name='Value'
'''
return_obj = {}
xmlelements = _get_child_nodes(xmldoc, parent_xml_element_name)
if xmlelements:
xmlelements = _get_child_nodes(xmlelements[0], pair_xml_element_name)
for pair in xmlelements:
keys = _get_child_nodes(pair, key_xml_element_name)
values = _get_child_nodes(pair, value_xml_element_name)
if keys and values:
key = keys[0].firstChild.nodeValue
value = values[0].firstChild.nodeValue
return_obj[key] = value
return return_obj
def _fill_instance_child(xmldoc, element_name, return_type):
'''Converts a child of the current dom element to the specified type.
'''
xmlelements = _get_child_nodes(
xmldoc, _get_serialization_name(element_name))
if not xmlelements:
return None
return_obj = return_type()
_fill_data_to_return_object(xmlelements[0], return_obj)
return return_obj
def _fill_instance_element(element, return_type):
"""Converts a DOM element into the specified object"""
return _parse_response_body_from_xml_node(element, return_type)
def _fill_data_minidom(xmldoc, element_name, data_member):
xmlelements = _get_child_nodes(
xmldoc, _get_serialization_name(element_name))
if not xmlelements or not xmlelements[0].childNodes:
return None
value = xmlelements[0].firstChild.nodeValue
if data_member is None:
return value
elif isinstance(data_member, datetime):
return _to_datetime(value)
elif type(data_member) is bool:
return value.lower() != 'false'
else:
return type(data_member)(value)
def _get_node_value(xmlelement, data_type):
value = xmlelement.firstChild.nodeValue
if data_type is datetime:
return _to_datetime(value)
elif data_type is bool:
return value.lower() != 'false'
else:
return data_type(value)
def _get_request_body_bytes_only(param_name, param_value):
'''Validates the request body passed in and converts it to bytes
if our policy allows it.'''
if param_value is None:
return b''
if isinstance(param_value, bytes):
return param_value
# Previous versions of the SDK allowed data types other than bytes to be
# passed in, and they would be auto-converted to bytes. We preserve this
# behavior when running under 2.7, but issue a warning.
# Python 3 support is new, so we reject anything that's not bytes.
if sys.version_info < (3,):
warnings.warn(_WARNING_VALUE_SHOULD_BE_BYTES.format(param_name))
return _get_request_body(param_value)
raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name))
def _get_request_body(request_body):
'''Converts an object into a request body. If it's None
we'll return an empty string, if it's one of our objects it'll
convert it to XML and return it. Otherwise we just use the object
directly'''
if request_body is None:
return b''
if isinstance(request_body, WindowsAzureData):
request_body = _convert_class_to_xml(request_body)
if isinstance(request_body, bytes):
return request_body
if isinstance(request_body, _unicode_type):
return request_body.encode('utf-8')
request_body = str(request_body)
if isinstance(request_body, _unicode_type):
return request_body.encode('utf-8')
return request_body
def _parse_enum_results_list(response, return_type, resp_type, item_type):
"""resp_body is the XML we received
resp_type is a string, such as Containers,
return_type is the type we're constructing, such as ContainerEnumResults
item_type is the type object of the item to be created, such as Container
This function then returns a ContainerEnumResults object with the
containers member populated with the results.
"""
# parsing something like:
# <EnumerationResults ... >
# <Queues>
# <Queue>
# <Something />
# <SomethingElse />
# </Queue>
# </Queues>
# </EnumerationResults>
respbody = response.body
return_obj = return_type()
doc = minidom.parseString(respbody)
items = []
for enum_results in _get_child_nodes(doc, 'EnumerationResults'):
# path is something like Queues, Queue
for child in _get_children_from_path(enum_results,
resp_type,
resp_type[:-1]):
items.append(_fill_instance_element(child, item_type))
for name, value in vars(return_obj).items():
# queues, Queues, this is the list its self which we populated
# above
if name == resp_type.lower():
# the list its self.
continue
value = _fill_data_minidom(enum_results, name, value)
if value is not None:
setattr(return_obj, name, value)
setattr(return_obj, resp_type.lower(), items)
return return_obj
def _parse_simple_list(response, type, item_type, list_name):
respbody = response.body
res = type()
res_items = []
doc = minidom.parseString(respbody)
type_name = type.__name__
item_name = item_type.__name__
for item in _get_children_from_path(doc, type_name, item_name):
res_items.append(_fill_instance_element(item, item_type))
setattr(res, list_name, res_items)
return res
def _parse_response(response, return_type):
'''
Parse the HTTPResponse's body and fill all the data into a class of
return_type.
'''
return _parse_response_body_from_xml_text(response.body, return_type)
def _parse_service_resources_response(response, return_type):
'''
Parse the HTTPResponse's body and fill all the data into a class of
return_type.
'''
return _parse_response_body_from_service_resources_xml_text(response.body, return_type)
def _fill_data_to_return_object(node, return_obj):
members = dict(vars(return_obj))
for name, value in members.items():
if isinstance(value, _list_of):
setattr(return_obj,
name,
_fill_list_of(node,
value.list_type,
value.xml_element_name))
elif isinstance(value, _scalar_list_of):
setattr(return_obj,
name,
_fill_scalar_list_of(node,
value.list_type,
_get_serialization_name(name),
value.xml_element_name))
elif isinstance(value, _dict_of):
setattr(return_obj,
name,
_fill_dict_of(node,
_get_serialization_name(name),
value.pair_xml_element_name,
value.key_xml_element_name,
value.value_xml_element_name))
elif isinstance(value, _xml_attribute):
real_value = None
if node.hasAttribute(value.xml_element_name):
real_value = node.getAttribute(value.xml_element_name)
if real_value is not None:
setattr(return_obj, name, real_value)
elif isinstance(value, WindowsAzureData):
setattr(return_obj,
name,
_fill_instance_child(node, name, value.__class__))
elif isinstance(value, dict):
setattr(return_obj,
name,
_fill_dict(node, _get_serialization_name(name)))
elif isinstance(value, _Base64String):
value = _fill_data_minidom(node, name, '')
if value is not None:
value = _decode_base64_to_text(value)
# always set the attribute, so we don't end up returning an object
# with type _Base64String
setattr(return_obj, name, value)
else:
value = _fill_data_minidom(node, name, value)
if value is not None:
setattr(return_obj, name, value)
def _parse_response_body_from_xml_node(node, return_type):
'''
parse the xml and fill all the data into a class of return_type
'''
return_obj = return_type()
_fill_data_to_return_object(node, return_obj)
return return_obj
def _parse_response_body_from_xml_text(respbody, return_type):
'''
parse the xml and fill all the data into a class of return_type
'''
doc = minidom.parseString(respbody)
return_obj = return_type()
xml_name = return_type._xml_name if hasattr(return_type, '_xml_name') else return_type.__name__
for node in _get_child_nodes(doc, xml_name):
_fill_data_to_return_object(node, return_obj)
return return_obj
def _parse_response_body_from_service_resources_xml_text(respbody, return_type):
'''
parse the xml and fill all the data into a class of return_type
'''
doc = minidom.parseString(respbody)
return_obj = _list_of(return_type)
for node in _get_children_from_path(doc, "ServiceResources", "ServiceResource"):
local_obj = return_type()
_fill_data_to_return_object(node, local_obj)
return_obj.append(local_obj)
return return_obj
class _dict_of(dict):
"""a dict which carries with it the xml element names for key,val.
Used for deserializaion and construction of the lists"""
def __init__(self, pair_xml_element_name, key_xml_element_name,
value_xml_element_name):
self.pair_xml_element_name = pair_xml_element_name
self.key_xml_element_name = key_xml_element_name
self.value_xml_element_name = value_xml_element_name
super(_dict_of, self).__init__()
class _list_of(list):
"""a list which carries with it the type that's expected to go in it.
Used for deserializaion and construction of the lists"""
def __init__(self, list_type, xml_element_name=None):
self.list_type = list_type
if xml_element_name is None:
self.xml_element_name = list_type.__name__
else:
self.xml_element_name = xml_element_name
super(_list_of, self).__init__()
class _scalar_list_of(list):
"""a list of scalar types which carries with it the type that's
expected to go in it along with its xml element name.
Used for deserializaion and construction of the lists"""
def __init__(self, list_type, xml_element_name):
self.list_type = list_type
self.xml_element_name = xml_element_name
super(_scalar_list_of, self).__init__()
class _xml_attribute:
"""a accessor to XML attributes
expected to go in it along with its xml element name.
Used for deserialization and construction"""
def __init__(self, xml_element_name):
self.xml_element_name = xml_element_name
def _update_request_uri_query_local_storage(request, use_local_storage):
''' create correct uri and query for the request '''
uri, query = _update_request_uri_query(request)
if use_local_storage:
return '/' + DEV_ACCOUNT_NAME + uri, query
return uri, query
def _update_request_uri_query(request):
'''pulls the query string out of the URI and moves it into
the query portion of the request object. If there are already
query parameters on the request the parameters in the URI will
appear after the existing parameters'''
if '?' in request.path:
request.path, _, query_string = request.path.partition('?')
if query_string:
query_params = query_string.split('&')
for query in query_params:
if '=' in query:
name, _, value = query.partition('=')
request.query.append((name, value))
request.path = url_quote(request.path, '/()$=\',')
# add encoded queries to request.path.
if request.query:
request.path += '?'
for name, value in request.query:
if value is not None:
request.path += name + '=' + url_quote(value, '/()$=\',') + '&'
request.path = request.path[:-1]
return request.path, request.query
def _dont_fail_on_exist(error):
''' don't throw exception if the resource exists.
This is called by create_* APIs with fail_on_exist=False'''
if isinstance(error, WindowsAzureConflictError):
return False
else:
raise error
def _dont_fail_not_exist(error):
''' don't throw exception if the resource doesn't exist.
This is called by create_* APIs with fail_on_exist=False'''
if isinstance(error, WindowsAzureMissingResourceError):
return False
else:
raise error
def _general_error_handler(http_error):
''' Simple error handler for azure.'''
if http_error.status == 409:
raise WindowsAzureConflictError(
_ERROR_CONFLICT.format(str(http_error)))
elif http_error.status == 404:
raise WindowsAzureMissingResourceError(
_ERROR_NOT_FOUND.format(str(http_error)))
else:
if http_error.respbody is not None:
raise WindowsAzureError(
_ERROR_UNKNOWN.format(str(http_error)) + '\n' + \
http_error.respbody.decode('utf-8'))
else:
raise WindowsAzureError(_ERROR_UNKNOWN.format(str(http_error)))
def _parse_response_for_dict(response):
''' Extracts name-values from response header. Filter out the standard
http headers.'''
if response is None:
return None
http_headers = ['server', 'date', 'location', 'host',
'via', 'proxy-connection', 'connection']
return_dict = HeaderDict()
if response.headers:
for name, value in response.headers:
if not name.lower() in http_headers:
return_dict[name] = value
return return_dict
def _parse_response_for_dict_prefix(response, prefixes):
''' Extracts name-values for names starting with prefix from response
header. Filter out the standard http headers.'''
if response is None:
return None
return_dict = {}
orig_dict = _parse_response_for_dict(response)
if orig_dict:
for name, value in orig_dict.items():
for prefix_value in prefixes:
if name.lower().startswith(prefix_value.lower()):
return_dict[name] = value
break
return return_dict
else:
return None
def _parse_response_for_dict_filter(response, filter):
''' Extracts name-values for names in filter from response header. Filter
out the standard http headers.'''
if response is None:
return None
return_dict = {}
orig_dict = _parse_response_for_dict(response)
if orig_dict:
for name, value in orig_dict.items():
if name.lower() in filter:
return_dict[name] = value
return return_dict
else:
return None
def _sign_string(key, string_to_sign, key_is_base64=True):
if key_is_base64:
key = _decode_base64_to_bytes(key)
else:
if isinstance(key, _unicode_type):
key = key.encode('utf-8')
if isinstance(string_to_sign, _unicode_type):
string_to_sign = string_to_sign.encode('utf-8')
signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256)
digest = signed_hmac_sha256.digest()
encoded_digest = _encode_base64(digest)
return encoded_digest
| apache-2.0 |
arenadata/ambari | ambari-server/src/test/resources/TestAmbaryServer.samples/dummy_extension/HIVE/package/scripts/status_params.py | 25 | 1062 | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
config = Script.get_config()
hive_pid_dir = config['configurations']['global']['hive_pid_dir']
hive_pid = 'hive-server.pid'
hive_metastore_pid = 'hive.pid'
hcat_pid_dir = config['configurations']['global']['hcat_pid_dir'] #hcat_pid_dir
| apache-2.0 |
awacha/cct | cct/qtgui/devices/motor/movemotor/movemotor.py | 1 | 4527 | import logging
from PyQt5 import QtWidgets, QtGui
from .movemotor_ui import Ui_Form
from ....core.mixins import ToolWindow
from .....core.devices import Motor
from .....core.instrument.privileges import PRIV_MOVEMOTORS
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class MoveMotor(QtWidgets.QWidget, Ui_Form, ToolWindow):
required_privilege = PRIV_MOVEMOTORS
def __init__(self, *args, **kwargs):
credo = kwargs.pop('credo')
self.motorname = kwargs.pop('motorname')
QtWidgets.QWidget.__init__(self, *args, **kwargs)
self.setupToolWindow(credo, required_devices=['Motor_' + self.motorname])
self._start_requested = False
self.setupUi(self)
def setupUi(self, Form):
Ui_Form.setupUi(self, Form)
self.motorComboBox.addItems(sorted(self.credo.motors.keys()))
self.motorComboBox.currentTextChanged.connect(self.onMotorSelected)
self.movePushButton.clicked.connect(self.onMove)
self.motorComboBox.setCurrentIndex(self.motorComboBox.findText(self.motorname))
self.relativeCheckBox.toggled.connect(self.onRelativeChanged)
self.targetDoubleSpinBox.editingFinished.connect(self.onEditingFinished)
self.onMotorSelected()
self.adjustSize()
def onEditingFinished(self):
if self.targetDoubleSpinBox.hasFocus():
self.onMove()
def onRelativeChanged(self):
self.onMotorPositionChange(self.motor(), self.motor().where())
if self.relativeCheckBox.isChecked():
self.targetDoubleSpinBox.setValue(0)
else:
self.targetDoubleSpinBox.setValue(self.motor().where())
self.adjustSize()
def setIdle(self):
super().setIdle()
self.movePushButton.setText('Move')
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/icons/motor.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.movePushButton.setIcon(icon)
self.targetDoubleSpinBox.setEnabled(True)
self.motorComboBox.setEnabled(True)
self.relativeCheckBox.setEnabled(True)
self.movePushButton.setEnabled(True)
self._start_requested = False
def setBusy(self):
self.movePushButton.setText('Stop')
self.movePushButton.setIcon(QtGui.QIcon.fromTheme('process-stop'))
self.targetDoubleSpinBox.setEnabled(False)
self.motorComboBox.setEnabled(False)
self.relativeCheckBox.setEnabled(False)
self.movePushButton.setEnabled(True)
super().setBusy()
def motor(self) -> Motor:
return self.credo.motors[self.motorComboBox.currentText()]
def onMove(self):
if self.movePushButton.text() == 'Move':
self.movePushButton.setEnabled(False)
self._start_requested = True
if self.relativeCheckBox.isChecked():
self.motor().moverel(self.targetDoubleSpinBox.value())
else:
self.motor().moveto(self.targetDoubleSpinBox.value())
else:
self.movePushButton.setEnabled(False)
self.motor().stop()
def onMotorStart(self, motor: Motor):
if self._start_requested:
self.setBusy()
def onMotorSelected(self):
self.setWindowTitle('Move motor {}'.format(self.motorComboBox.currentText()))
for d in self.required_devices:
self.unrequireDevice(d)
self.required_devices = ['Motor_' + self.motorComboBox.currentText()]
self.requireDevice(self.required_devices[0])
motor = self.credo.motors[self.motorComboBox.currentText()]
self.onMotorPositionChange(motor, motor.where())
if self.relativeCheckBox.isChecked():
self.targetDoubleSpinBox.setValue(0.0)
else:
self.targetDoubleSpinBox.setValue(motor.where())
def onMotorPositionChange(self, motor: Motor, newposition: float):
self.positionLabel.setText('<b>{:.4f}</b>'.format(newposition))
left = motor.get_variable('softleft')
right = motor.get_variable('softright')
if self.relativeCheckBox.isChecked():
left -= newposition
right -= newposition
self.targetDoubleSpinBox.setMinimum(left)
self.targetDoubleSpinBox.setMaximum(right)
self.leftLimitLabel.setText('{:.4f}'.format(left))
self.rightLimitLabel.setText('{:.4f}'.format(right))
self.adjustSize()
def onMotorStop(self, motor: Motor, targetpositionreached: bool):
self.setIdle()
| bsd-3-clause |